code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE : Any = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
SCREAMING_SNAKE_CASE : List[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=1, lowerCamelCase="binary", lowerCamelCase=None) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = fa_score(
lowerCamelCase, lowerCamelCase, labels=lowerCamelCase, pos_label=lowerCamelCase, average=lowerCamelCase, sample_weight=lowerCamelCase)
return {"f1": float(lowerCamelCase) if score.size == 1 else score}
| 21 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE :Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE :Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__SCREAMING_SNAKE_CASE :Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE :List[str] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = None
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(__lowercase )
_UpperCAmelCase = _re_checkpoint.findall(__lowercase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = ckpt_name
break
return checkpoint
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase = get_checkpoint_from_config_class(__lowercase )
_UpperCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = "\n".join(sorted(__lowercase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 22 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = """google/mobilebert-uncased"""
def A ( self : Any ) -> int:
super().setUp()
UpperCAmelCase : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = '''unwanted, running'''
return input_text, output_text
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def A ( self : Any ) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Any = self.get_rust_tokenizer()
UpperCAmelCase : int = '''UNwant\u00E9d,running'''
UpperCAmelCase : Dict = tokenizer.tokenize(__snake_case )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : int = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : int = tokenizer.encode(__snake_case )
UpperCAmelCase : str = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
UpperCAmelCase : Dict = self.get_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : int = self.get_rust_tokenizer(do_lower_case=__snake_case )
UpperCAmelCase : Dict = '''UNwant\u00E9d,running'''
UpperCAmelCase : int = tokenizer.tokenize(__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[Any] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(__snake_case )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Any = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Tuple ) -> Any:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase : Tuple = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase : List[str] = i
UpperCAmelCase : str = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self : Union[str, Any] ) -> Tuple:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self : Union[str, Any] ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self : Optional[int] ) -> Tuple:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : Optional[int] = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
UpperCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False
UpperCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : str = ['''的''', '''人''', '''有''']
UpperCAmelCase : List[Any] = ''''''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : Tuple = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case )
UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 23 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
import math
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__snake_case = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Dict=1 , **snake_case_ : List[Any] ) -> str:
__snake_case = factor * value
__snake_case = value
while not is_prime(snake_case_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case_ )
return value
| 24 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = False ):
if radian_mode:
return [magnitude * cos(_snake_case ), magnitude * sin(_snake_case )]
return [magnitude * cos(radians(_snake_case ) ), magnitude * sin(radians(_snake_case ) )]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = 10**-1 ):
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = cross(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : float = sum(_snake_case )
return abs(_snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ : int = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ : List[str] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ : str = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ : Tuple = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 25 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 26 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowercase : List[str] = ['gpt2']
__lowercase : int = 'gpt2'
if is_tf_available():
class __UpperCamelCase ( tf.Module ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : Dict = tokenizer
__a : List[Any] = AutoConfig.from_pretrained(__a )
__a : List[Any] = TFGPTaLMHeadModel.from_config(__a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[Any] = self.tokenizer(__a )
__a : Optional[int] = tokenized['input_ids'].to_tensor()
__a : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__a : str = self.model(input_ids=__a , attention_mask=__a )['logits']
return outputs
@require_tf
@require_keras_nlp
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : List[str] = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__a : Any = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__a : int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__a : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__a : List[Any] = tokenizer([test_inputs] , return_tensors='tf' )
__a : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__a : Optional[Any] = python_outputs[key].numpy()
__a : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : int = tf.function(__a )
for test_inputs in self.test_sentences:
__a : Dict = tf.constant(__a )
__a : List[str] = compiled_tokenizer(__a )
__a : str = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : int = ModelToSave(tokenizer=__a )
__a : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
__a : Optional[Any] = model.serving(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__a : int = Path(__a ) / 'saved.model'
tf.saved_model.save(__a , __a , signatures={'serving_default': model.serving} )
__a : List[Any] = tf.saved_model.load(__a )
__a : List[Any] = loaded_model.signatures['serving_default'](__a )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
__a : str = tf_tokenizer(__a ) # Build model with some sample inputs
__a : Union[str, Any] = tf_tokenizer.get_config()
__a : List[Any] = TFGPTaTokenizer.from_config(__a )
__a : Tuple = model_from_config(__a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__a : List[Any] = 12_3123
for max_length in [3, 5, 1024]:
__a : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
__a : Dict = tf_tokenizer(__a , max_length=__a )
__a : Dict = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 27 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
_lowerCamelCase : List[Any] = "huggingface-tools/default-prompts"
_lowerCamelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( A__ , A__ , A__="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
UpperCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , A__ ) is not None:
return prompt_or_repo_id
UpperCamelCase = cached_file(
A__ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(A__ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 28 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Dict = path_or_paths
UpperCAmelCase_ : Union[str, Any] = split if split or isinstance(_UpperCamelCase , _UpperCamelCase ) else 'train'
UpperCAmelCase_ : Dict = features
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : List[str] = streaming
UpperCAmelCase_ : Any = num_proc
UpperCAmelCase_ : List[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Any = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : str = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 29 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowercase_ = list(snake_case__ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowercase_ = i
lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 30 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
__SCREAMING_SNAKE_CASE : str = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCamelCase: str = "nezha"
def __init__( self : Optional[int] , A : Optional[Any]=21128 , A : Any=768 , A : Optional[int]=12 , A : Dict=12 , A : List[str]=3072 , A : Dict="gelu" , A : List[str]=0.1 , A : Optional[int]=0.1 , A : str=512 , A : int=64 , A : Optional[int]=2 , A : str=0.02 , A : List[str]=1E-12 , A : List[Any]=0.1 , A : Dict=0 , A : Any=2 , A : Union[str, Any]=3 , A : str=True , **A : Any , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = max_relative_position
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Dict = classifier_dropout
_UpperCAmelCase : List[Any] = use_cache
| 31 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
a_ : str = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__A )
a_ : Tuple = [True] * (num + 1)
a_ : Union[str, Any] = []
a_ : Optional[int] = 2
a_ : Optional[Any] = int(math.sqrt(__A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__A )
# Set multiples of start be False
for i in range(start * start , num + 1 , __A ):
if sieve[i] is True:
a_ : Any = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 32 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : List[str] = logging.get_logger(__name__)
__A : int = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = "deta"
SCREAMING_SNAKE_CASE_ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , A : Optional[int]=None , A : Union[str, Any]=9_00 , A : Tuple=20_48 , A : int=6 , A : str=20_48 , A : Any=8 , A : Optional[int]=6 , A : Dict=10_24 , A : str=8 , A : Dict=0.0 , A : Union[str, Any]=True , A : List[Any]="relu" , A : Tuple=2_56 , A : Optional[int]=0.1 , A : int=0.0 , A : str=0.0 , A : List[Any]=0.02 , A : Union[str, Any]=1.0 , A : str=True , A : str=False , A : Optional[int]="sine" , A : Optional[Any]=5 , A : str=4 , A : Union[str, Any]=4 , A : Tuple=True , A : Union[str, Any]=3_00 , A : Optional[Any]=True , A : int=True , A : Dict=1 , A : Tuple=5 , A : Optional[Any]=2 , A : Optional[Any]=1 , A : Any=1 , A : int=5 , A : Optional[Any]=2 , A : List[str]=0.1 , A : Dict=0.25 , **A : Tuple , ) -> Dict:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(A , A ):
lowercase_ : List[str] = backbone_config.pop('''model_type''' )
lowercase_ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Union[str, Any] = config_class.from_dict(A )
lowercase_ : List[str] = backbone_config
lowercase_ : Optional[int] = num_queries
lowercase_ : str = max_position_embeddings
lowercase_ : Any = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : List[str] = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : int = decoder_ffn_dim
lowercase_ : List[Any] = decoder_layers
lowercase_ : int = decoder_attention_heads
lowercase_ : Optional[Any] = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : str = activation_dropout
lowercase_ : List[str] = activation_function
lowercase_ : int = init_std
lowercase_ : Dict = init_xavier_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : str = auxiliary_loss
lowercase_ : Dict = position_embedding_type
# deformable attributes
lowercase_ : Union[str, Any] = num_feature_levels
lowercase_ : Optional[int] = encoder_n_points
lowercase_ : Dict = decoder_n_points
lowercase_ : Tuple = two_stage
lowercase_ : Union[str, Any] = two_stage_num_proposals
lowercase_ : Tuple = with_box_refine
lowercase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase_ : Optional[Any] = class_cost
lowercase_ : Dict = bbox_cost
lowercase_ : Optional[int] = giou_cost
# Loss coefficients
lowercase_ : Optional[int] = mask_loss_coefficient
lowercase_ : Optional[Any] = dice_loss_coefficient
lowercase_ : Dict = bbox_loss_coefficient
lowercase_ : int = giou_loss_coefficient
lowercase_ : Union[str, Any] = eos_coefficient
lowercase_ : Dict = focal_alpha
super().__init__(is_encoder_decoder=A , **A )
@property
def A ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def A ( self : Optional[int] ) -> int:
return self.d_model
def A ( self : List[Any] ) -> Dict:
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 33 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
def __init__( self : str , lowercase : List[Any] , lowercase : Dict=2 , lowercase : str=32 , lowercase : Optional[Any]=16 , lowercase : Optional[Any]=3 , lowercase : Union[str, Any]=True , lowercase : List[Any]=True , lowercase : Optional[int]=32 , lowercase : Any=4 , lowercase : str=[0, 1, 2, 3] , lowercase : List[Any]=4 , lowercase : str=37 , lowercase : Optional[Any]="gelu" , lowercase : Tuple=0.1 , lowercase : Tuple=0.1 , lowercase : Union[str, Any]=0.02 , lowercase : int=3 , lowercase : int=[1, 384, 24, 24] , lowercase : str=True , lowercase : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = backbone_out_indices
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = backbone_featmap_shape
UpperCAmelCase = scope
UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Optional[int] , lowercase : str , lowercase : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = DPTModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , lowercase : Tuple , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DPTForDepthEstimation(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : int , lowercase : Dict , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DPTForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : Optional[int] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Any = False
__a : List[Any] = False
__a : Dict = False
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = DPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
def A ( self : List[str] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
if model_class in get_values(lowercase ):
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : str ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = False
UpperCAmelCase = True
if model_class in get_values(lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowercase )
# Skip the check for the backbone
UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : int ):
'''simple docstring'''
pass
@slow
def A ( self : Any ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase = DPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = '''add'''
with self.assertRaises(lowercase ):
UpperCAmelCase = DPTForDepthEstimation(lowercase )
def snake_case_ ():
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(lowercase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' ).to(lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowercase )
UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , lowercase )
UpperCAmelCase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowercase , atol=1E-4 ) )
| 34 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : int ):
snake_case__ : Tuple = n
snake_case__ : int = [None] * self.n
snake_case__ : Optional[int] = 0 # index of the first element
snake_case__ : List[Any] = 0
snake_case__ : str = 0
def __len__( self : Optional[Any] ):
return self.size
def lowerCamelCase ( self : Dict ):
return self.size == 0
def lowerCamelCase ( self : Optional[int] ):
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase ( self : Optional[int] , snake_case_ : str ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
snake_case__ : Optional[int] = data
snake_case__ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase ( self : List[Any] ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
snake_case__ : List[str] = self.array[self.front]
snake_case__ : str = None
snake_case__ : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 35 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Train language if it is different from the evaluation language.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : List[Any] = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[Any] = train_dataset.features["label"].names
if training_args.do_eval:
_lowerCAmelCase : int = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
_lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = predict_dataset.features["label"].names
# Labels
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : List[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : Optional[Any] = False
def preprocess_function(_lowerCamelCase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCAmelCase : Optional[Any] = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_lowerCAmelCase : Dict = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Union[str, Any] = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCAmelCase : Union[str, Any] = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_lowerCAmelCase : Tuple = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : List[Any] = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
_lowerCAmelCase : List[str] = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_lowerCAmelCase : Optional[int] = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
_lowerCAmelCase : List[str] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
_lowerCAmelCase : Dict = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : int = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : List[str] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
_lowerCAmelCase : Union[str, Any] = None
# Initialize our Trainer
_lowerCAmelCase : Dict = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : Any = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : List[str] = train_result.metrics
_lowerCAmelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Tuple = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = trainer.predict(_lowerCamelCase , metric_key_prefix="predict" )
_lowerCAmelCase : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : int = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("predict" , _lowerCamelCase )
trainer.save_metrics("predict" , _lowerCamelCase )
_lowerCAmelCase : str = np.argmax(_lowerCamelCase , axis=1 )
_lowerCAmelCase : Any = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 36 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Any = 0
def UpperCAmelCase_ ( self ) -> List[Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase__ : List[str] = os.path.join(__UpperCAmelCase ,"""fake-roberta""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase__ : int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertEqual(type(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
try:
AutoConfig.register("""custom""" ,__UpperCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoConfig.register("""model""" ,__UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoConfig.register("""bert""" ,__UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCAmelCase ,"""bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained("""bert-base""" )
def UpperCAmelCase_ ( self ) -> List[str]:
with self.assertRaisesRegex(
__UpperCAmelCase ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ : str = AutoConfig.from_pretrained(__UpperCAmelCase ,revision="""aaaaaa""" )
def UpperCAmelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(
__UpperCAmelCase ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def UpperCAmelCase_ ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''new-model'''
try:
AutoConfig.register("""new-model""" ,__UpperCAmelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
import math
def lowercase ( A_ , A_ = 0 , A_ = 0 )-> list:
'''simple docstring'''
a : Optional[Any] = end or len(A_ )
for i in range(A_ , A_ ):
a : Optional[Any] = i
a : Dict = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a : str = array[temp_index - 1]
temp_index -= 1
a : Optional[Any] = temp_index_value
return array
def lowercase ( A_ , A_ , A_ )-> None: # Max Heap
'''simple docstring'''
a : Union[str, Any] = index
a : str = 2 * index + 1 # Left Node
a : Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a : List[Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a : Optional[int] = right_index
if largest != index:
a , a : List[Any] = array[largest], array[index]
heapify(A_ , A_ , A_ )
def lowercase ( A_ )-> list:
'''simple docstring'''
a : Optional[int] = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
a , a : Any = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def lowercase ( A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
a : str = low
a : int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a , a : int = array[j], array[i]
i += 1
def lowercase ( A_ )-> list:
'''simple docstring'''
if len(A_ ) == 0:
return array
a : Union[str, Any] = 2 * math.ceil(math.loga(len(A_ ) ) )
a : Optional[int] = 16
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
a : Tuple = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
a : Optional[Any] = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
a : Dict = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = input("""Enter numbers separated by a comma : """).strip()
__lowercase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 40 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as f:
lowerCamelCase__ : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase ) for x in f.readline().split()] )
lowerCamelCase__ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase__ : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase__ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase__ : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase__ : Dict = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase__ : List[str] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase__ : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase__ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase__ : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_snake_case = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_snake_case = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_snake_case = DetrConfig(use_timm_backbone=__A , backbone_config=__A )
# set label attributes
_snake_case = 'panoptic' in model_name
if is_panoptic:
_snake_case = 250
else:
_snake_case = 91
_snake_case = 'huggingface/label-files'
_snake_case = 'coco-detection-id2label.json'
_snake_case = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__A ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE__ ( __A ) -> Tuple:
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
_snake_case = state_dict.pop(__A )
_snake_case = val
def SCREAMING_SNAKE_CASE__ ( __A , __A=False ) -> int:
_snake_case = ''
if is_panoptic:
_snake_case = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:256, :]
_snake_case = in_proj_bias[:256]
_snake_case = in_proj_weight[256:512, :]
_snake_case = in_proj_bias[256:512]
_snake_case = in_proj_weight[-256:, :]
_snake_case = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:256, :]
_snake_case = in_proj_bias[:256]
_snake_case = in_proj_weight[256:512, :]
_snake_case = in_proj_bias[256:512]
_snake_case = in_proj_weight[-256:, :]
_snake_case = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case = in_proj_weight_cross_attn[:256, :]
_snake_case = in_proj_bias_cross_attn[:256]
_snake_case = in_proj_weight_cross_attn[256:512, :]
_snake_case = in_proj_bias_cross_attn[256:512]
_snake_case = in_proj_weight_cross_attn[-256:, :]
_snake_case = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=False ) -> Optional[int]:
_snake_case , _snake_case = get_detr_config(__A )
# load original model from torch hub
_snake_case = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'Converting model {model_name}...' )
_snake_case = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__A ).eval()
_snake_case = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__A ):
if is_panoptic:
_snake_case = 'detr.' + src
rename_key(__A , __A , __A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_snake_case = state_dict.pop(__A )
_snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_snake_case = state_dict.pop(__A )
_snake_case = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_snake_case = state_dict.pop(__A )
_snake_case = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_snake_case = state_dict.pop(__A )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetrForSegmentation(__A ) if is_panoptic else DetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
# verify our conversion on an image
_snake_case = 'coco_panoptic' if is_panoptic else 'coco_detection'
_snake_case = DetrImageProcessor(format=__A )
_snake_case = processor(images=prepare_img() , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = detr(__A )
_snake_case = model(__A )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowercase : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 42 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''')}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()})]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()})
]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> Any:
import apache_beam as beam
__UpperCamelCase :int = beam.io.parquetio.WriteToParquet
__UpperCamelCase :Optional[int] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
with patch('''apache_beam.io.parquetio.WriteToParquet''') as write_parquet_mock:
__UpperCamelCase :List[Any] = partial(__lowercase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :Dict = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content''']) , sorted(['''foo''', '''bar''', '''foobar''']))
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Tuple = NestedBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}))
__UpperCamelCase :Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
| 43 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
from math import factorial
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : Dict = real
if isinstance(a__ , a__ ):
_lowerCAmelCase : int = [1] * rank
else:
_lowerCAmelCase : int = rank
def __repr__( self ):
return (
F"{self.real}+"
F"{'+'.join(str(a__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __A ( self ):
_lowerCAmelCase : str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a__ )
def __add__( self , a__ ):
if not isinstance(a__ , a__ ):
return Dual(self.real + other , self.duals )
_lowerCAmelCase : List[str] = self.duals.copy()
_lowerCAmelCase : Optional[int] = other.duals.copy()
if len(a__ ) > len(a__ ):
o_dual.extend([1] * (len(a__ ) - len(a__ )) )
elif len(a__ ) < len(a__ ):
s_dual.extend([1] * (len(a__ ) - len(a__ )) )
_lowerCAmelCase : List[str] = []
for i in range(len(a__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a__ )
_UpperCamelCase : Optional[Any] = __add__
def __sub__( self , a__ ):
return self + other * -1
def __mul__( self , a__ ):
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a__ )
_lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a__ )
_UpperCamelCase : Optional[Any] = __mul__
def __truediv__( self , a__ ):
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a__ )
raise ValueError
def __floordiv__( self , a__ ):
if not isinstance(a__ , a__ ):
_lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a__ )
raise ValueError
def __pow__( self , a__ ):
if n < 0 or isinstance(a__ , a__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
_lowerCAmelCase : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> Tuple:
if not callable(_lowerCamelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowerCamelCase ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
_lowerCAmelCase : int = Dual(_lowerCamelCase ,1 )
_lowerCAmelCase : str = func(_lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Dict:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 44 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : float ) -> float:
return 10 - x * x
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) >= 0:
raise ValueError('''Wrong space!''' )
__a = a
while (b - a) >= 0.01:
# Find middle point
__a = (a + b) / 2
# Check if middle point is root
if equation(lowerCAmelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) < 0:
__a = c
else:
__a = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 45 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> Tuple:
lowerCAmelCase = path_or_paths
lowerCAmelCase = split if split or isinstance(lowercase , lowercase ) else """train"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> str:
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, IterableDataset]:
pass
| 46 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : List[str] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCamelCase : Any = 1_0
lowerCamelCase : Tuple = 2_5_6
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCamelCase ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE =MinHash(num_perm=_UpperCamelCase )
for token in set(_UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCamelCase ) if len(t.strip() ) > 0}
class A__ :
def __init__( self : Dict , *,
_a : float = 0.85 , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE =NUM_PERM
_SCREAMING_SNAKE_CASE =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE =defaultdict(_a )
def A ( self : Tuple , _a : Tuple , _a : MinHash ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._index.query(_a )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def A ( self : int ) -> List[List[Dict]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE =[base] + list(_a )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE =[{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def A ( self : Union[str, Any] , _a : Optional[Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_duplicate_clusters()
with open(_a , 'w' ) as f:
json.dump(_a , _a )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCamelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DuplicationIndex(duplication_jaccard_threshold=_UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCamelCase ) ) , max_queue_size=1_00 ) ):
di.add(_UpperCamelCase , _UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_tokens(_UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : Union[str, Any] = None
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for elementa in cluster:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_SCREAMING_SNAKE_CASE =_shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCamelCase , _UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE =1
extremes.append(_UpperCamelCase )
return extremes
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCamelCase , _UpperCamelCase , ) , total=len(_UpperCamelCase ) , ):
extremes_list.append(_UpperCamelCase )
return extremes_list
def _lowerCAmelCase ( _UpperCamelCase : Type[Dataset] , _UpperCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =make_duplicate_clusters(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE ={x['base_index'] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =find_extremes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE =element
_SCREAMING_SNAKE_CASE =duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE =dataset.filter(lambda _UpperCamelCase , _UpperCamelCase : idx not in remove_indices , with_indices=_UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE =element['base_index'] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE =extreme_dict[element['base_index']]['copies']
print(f"Original dataset size: {len(_UpperCamelCase )}" )
print(f"Number of duplicate clusters: {len(_UpperCamelCase )}" )
print(f"Files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_UpperCamelCase )}" )
print(f"Filtered dataset size: {len(_UpperCamelCase )}" )
return ds_filter, duplicate_clusters
| 47 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : int = val
lowerCamelCase : Optional[int] = None
lowerCamelCase : Any = None
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
lowerCamelCase : Union[str, Any] = Node(UpperCamelCase__ )
else:
self.left.insert(UpperCamelCase__ )
elif val > self.val:
if self.right is None:
lowerCamelCase : Optional[Any] = Node(UpperCamelCase__ )
else:
self.right.insert(UpperCamelCase__ )
else:
lowerCamelCase : List[str] = val
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Recursive traversal
if root:
inorder(root.left ,_SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
# Build BST
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
lowerCamelCase : List[Any] = Node(arr[0] )
for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCamelCase : Optional[Any] = []
inorder(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 48 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Any=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : str="None" , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = relative_attention
__a = position_biased_input
__a = pos_att_type
__a = scope
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = TFDebertaVaModel(config=__SCREAMING_SNAKE_CASE)
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = TFDebertaVaForMaskedLM(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForSequenceClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForTokenClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = TFDebertaVaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Optional[int] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = TFDebertaVaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_tf
class _A ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
__a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]])
__a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4)
| 49 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_UpperCAmelCase : Dict = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
_UpperCAmelCase : List[str] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ConvBertTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple="[UNK]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : int=None , **UpperCAmelCase : Union[str, Any] , ) -> List[str]:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Dict = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase__ : Tuple = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_lower_case
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any=None ) -> Dict:
lowerCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
from __future__ import annotations
def A (__A : str ) -> list[int]:
"""simple docstring"""
return [ord(__A ) - 96 for elem in plain]
def A (__A : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __A )
print('''Decoded:''' , decode(__A ) )
if __name__ == "__main__":
main()
| 51 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCamelCase : Any = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
UpperCamelCase : str = model.state_dict()
def to_tf_var_name(_lowerCAmelCase ):
for patt, repl in iter(_lowerCAmelCase ):
UpperCamelCase : str = name.replace(_lowerCAmelCase , _lowerCAmelCase )
return F"""bert/{name}"""
def create_tf_var(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase : str = tf.get_variable(dtype=_lowerCAmelCase , shape=tensor.shape , name=_lowerCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase : str = to_tf_var_name(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase : Any = torch_tensor.T
UpperCamelCase : Any = create_tf_var(tensor=_lowerCAmelCase , name=_lowerCAmelCase , session=_lowerCAmelCase )
tf.keras.backend.set_value(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = session.run(_lowerCAmelCase )
print(F"""Successfully created {tf_name}: {np.allclose(_lowerCAmelCase , _lowerCAmelCase )}""" )
UpperCamelCase : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A_ ( _lowerCAmelCase=None ) -> Dict:
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory in which to save tensorflow model" )
UpperCamelCase : List[Any] = parser.parse_args(_lowerCAmelCase )
UpperCamelCase : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 52 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[Any] ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ) -> Optional[int]:
with open(UpperCAmelCase__ , encoding="utf-8" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__SCREAMING_SNAKE_CASE = input_file.read()
__SCREAMING_SNAKE_CASE = regexp.search(UpperCAmelCase__ )
return match
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str ) -> Union[str, Any]:
with open(UpperCAmelCase__ , encoding="utf-8" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__SCREAMING_SNAKE_CASE = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__SCREAMING_SNAKE_CASE = regexp.finditer(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = Path("./datasets" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase__ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCAmelCase_ ( self : str ) -> int:
__SCREAMING_SNAKE_CASE = Path("./datasets" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase__ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 54 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : float ):
return 10 - x * x
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 55 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a : Dict = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
def __init__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 56 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return "".join(chr(ord(_UpperCamelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase_ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowercase_ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowercase_ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def snake_case_( self , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_SCREAMING_SNAKE_CASE = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_SCREAMING_SNAKE_CASE = evaluate(dataset=A , predictions=A )
return score
| 58 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
from __future__ import annotations
__lowerCamelCase = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
snake_case : Union[str, Any] = 0
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 59 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 60 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a = False
class A_ (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : int = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = generator.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = "cyberpunk 2077"
UpperCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger "
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
UpperCAmelCase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 61 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =ZeroShotClassificationPipeline(
model=A_ , tokenizer=A_ , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _a ( self , A_ , A_ ) -> Any:
__UpperCamelCase =classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
# No kwarg
__UpperCamelCase =classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
__UpperCamelCase =classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
__UpperCamelCase =classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__UpperCamelCase =classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__UpperCamelCase =classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(A_ , {'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCamelCase =classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]}
for i in range(1 )
] , )
__UpperCamelCase =classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]}
for i in range(2 )
] , )
with self.assertRaises(A_ ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(A_ ):
classifier(A_ , candidate_labels='politics' )
with self.assertRaises(A_ ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(A_ ):
classifier('Who are you voting for in 2020?' , candidate_labels=A_ )
with self.assertRaises(A_ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(A_ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=A_ , )
self.run_entailment_id(A_ )
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =zero_shot_classifier.model.config
__UpperCamelCase =config.labelaid
__UpperCamelCase =zero_shot_classifier.entailment_id
__UpperCamelCase ={'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__UpperCamelCase ={'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCamelCase ={'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCamelCase ={'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__UpperCamelCase =original_labelaid
self.assertEqual(A_ , zero_shot_classifier.entailment_id )
@require_torch
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _a ( self ) -> int:
__UpperCamelCase =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__UpperCamelCase =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def _a ( self ) -> List[Any]:
__UpperCamelCase =pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__UpperCamelCase =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _a ( self ) -> List[Any]:
__UpperCamelCase =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__UpperCamelCase =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__UpperCamelCase =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=A_ , )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _a ( self ) -> List[str]:
__UpperCamelCase =pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__UpperCamelCase =zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__UpperCamelCase =zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=A_ , )
self.assertEqual(
nested_simplify(A_ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 62 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =UNetaDModel
__a ='sample'
@property
def UpperCamelCase__ ( self : Tuple ):
_a = 4
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
_a = torch.tensor([10] ).to(__a )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self : List[Any] ):
return (3, 32, 32)
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
_a = self.dummy_input
return init_dict, inputs_dict
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =UNetaDModel
__a ='sample'
@property
def UpperCamelCase__ ( self : List[Any] ):
_a = 4
_a = 4
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
_a = torch.tensor([10] ).to(__a )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self : List[str] ):
return (4, 32, 32)
@property
def UpperCamelCase__ ( self : List[Any] ):
return (4, 32, 32)
def UpperCamelCase__ ( self : Optional[int] ):
_a = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
_a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self : str ):
_a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
_a = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def UpperCamelCase__ ( self : int ):
_a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a )
model.to(__a )
_a = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def UpperCamelCase__ ( self : Any ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_a , _a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__a )
model_accelerate.to(__a )
model_accelerate.eval()
_a = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_a = noise.to(__a )
_a = torch.tensor([10] * noise.shape[0] ).to(__a )
_a = model_accelerate(__a , __a )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_a , _a = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=__a , low_cpu_mem_usage=__a )
model_normal_load.to(__a )
model_normal_load.eval()
_a = model_normal_load(__a , __a )["sample"]
assert torch_all_close(__a , __a , rtol=1e-3 )
def UpperCamelCase__ ( self : Dict ):
_a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(__a )
_a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_a = noise.to(__a )
_a = torch.tensor([10] * noise.shape[0] ).to(__a )
with torch.no_grad():
_a = model(__a , __a ).sample
_a = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_a = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__a , __a , rtol=1e-3 ) )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =UNetaDModel
__a ='sample'
@property
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any]=(32, 32) ):
_a = 4
_a = 3
_a = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
_a = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__a )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self : List[Any] ):
return (3, 32, 32)
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return (3, 32, 32)
def UpperCamelCase__ ( self : List[str] ):
_a = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
_a = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
_a , _a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
_a = self.dummy_input
_a = floats_tensor((4, 3) + (2_56, 2_56) ).to(__a )
_a = noise
_a = model(**__a )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(__a )
_a = 4
_a = 3
_a = (2_56, 2_56)
_a = torch.ones((batch_size, num_channels) + sizes ).to(__a )
_a = torch.tensor(batch_size * [1e-4] ).to(__a )
with torch.no_grad():
_a = model(__a , __a ).sample
_a = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_a = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) )
def UpperCamelCase__ ( self : Dict ):
_a = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(__a )
_a = 4
_a = 3
_a = (32, 32)
_a = torch.ones((batch_size, num_channels) + sizes ).to(__a )
_a = torch.tensor(batch_size * [1e-4] ).to(__a )
with torch.no_grad():
_a = model(__a , __a ).sample
_a = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_a = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) )
def UpperCamelCase__ ( self : Tuple ):
# not required for this model
pass
| 63 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
def merge(snake_case__ : list , snake_case__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_snake_case : str = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 64 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( __A, __A, __A, __A, __A = None, __A = None, __A = None, ) -> str:
'''simple docstring'''
if config_name_or_path is None:
UpperCAmelCase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ = question_encoder_name_or_path
UpperCAmelCase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ = RagConfig.from_pretrained(__A )
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ = gen_config
UpperCAmelCase__ = question_encoder_config
UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator(
__A, __A, config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 65 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: str = "▁" , snake_case: bool = True , snake_case: Union[str, AddedToken] = "<unk>" , snake_case: Union[str, AddedToken] = "</s>" , snake_case: Union[str, AddedToken] = "<pad>" , ) -> Any:
snake_case_ :Any = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case_ :Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ :Tuple = token_dict["""token"""]
snake_case_ :Union[str, Any] = Tokenizer(Unigram() )
snake_case_ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case_ :str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case , add_prefix_space=snake_case ),
pre_tokenizers.Digits(individual_digits=snake_case ),
pre_tokenizers.Punctuation(),
] )
snake_case_ :Dict = decoders.Metaspace(replacement=snake_case , add_prefix_space=snake_case )
snake_case_ :str = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case_ :Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> int:
snake_case_ :List[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
if isinstance(snake_case , snake_case ):
snake_case_ :int = [files]
self._tokenizer.train(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: Dict , snake_case: Union[Iterator[str], Iterator[Iterator[str]]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> List[str]:
snake_case_ :Optional[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
self._tokenizer.train_from_iterator(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case_ :Dict = json.loads(self._tokenizer.to_str() )
snake_case_ :Optional[int] = self.special_tokens["""unk"""]["""id"""]
snake_case_ :Optional[int] = Tokenizer.from_str(json.dumps(snake_case ) )
| 66 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCAmelCase ( UpperCamelCase__ ) -> tuple:
return (data["data"], data["target"])
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> np.ndarray:
__lowerCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__lowerCamelCase = xgb.predict(UpperCamelCase__ )
__lowerCamelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase , __lowerCamelCase = data_handling(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.2_5 , random_state=1 )
__lowerCamelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(f"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 67 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase__ = [8, 5, 9, 7]
lowerCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , ) -> None:
'''simple docstring'''
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCamelCase ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(lowercase ): i for i in self.__need()}
def UpperCamelCase ( self , **lowercase ) -> None:
'''simple docstring'''
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(lowercase ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(F'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(lowercase )
# update available/freed resources stack
A__ = np.array(lowercase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(lowercase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F'P{self.__allocated_resources_table.index(lowercase ) + 1}'
+ " ".join(F'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F'P{self.__maximum_claim_table.index(lowercase ) + 1}'
+ " ".join(F'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(lowercase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(lowercase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> None:
snake_case_ = None
for i in sorted(lowerCAmelCase__, reverse=lowerCAmelCase__):
snake_case_ = Node(lowerCAmelCase__, self.head)
def __iter__( self) -> Iterator[int]:
snake_case_ = self.head
while node:
yield node.data
snake_case_ = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(lowerCAmelCase__) for node in self])
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCAmelCase ) + list(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 69 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : List[str] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = ['''pixel_values''']
def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None:
super().__init__(**__snake_case )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = offset
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict:
_lowerCAmelCase = image.astype(np.floataa )
if offset:
_lowerCAmelCase = image - (scale / 2)
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(__snake_case )
if do_resize:
_lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case )
if do_center_crop:
_lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case )
if do_rescale:
_lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case )
if do_normalize:
_lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case )
_lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case )
return image
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = offset if offset is not None else self.offset
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase = make_batched(__snake_case )
_lowerCAmelCase = [
[
self._preprocess_image(
image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 70 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
def A ( a_ ,a_ ) -> int:
return 1 if input_a == input_a else 0
def A ( ) -> None:
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 71 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Optional[int] = HfArgumentParser(lowerCamelCase__ )
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : str = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__lowerCamelCase : Dict = ' '.join(str(lowerCamelCase__ ).split(' ' )[:-1] )
__lowerCamelCase : List[Any] = ''
__lowerCamelCase : Dict = eval(str(lowerCamelCase__ ).split(' ' )[-1] )
__lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def _snake_case ( snake_case__ : Dict ):
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def _snake_case ( snake_case__ : Tuple ):
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def _snake_case ( snake_case__ : Dict ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
_lowercase = 1
while K:
_lowercase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_lowercase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''') | 74 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Dict =PegasusTokenizer
lowercase : List[str] =PegasusTokenizerFast
lowercase : Any =True
lowercase : Tuple =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ =PegasusTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''</s>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(lowerCAmelCase ), 1_103 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_103 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =(
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ =rust_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
lowerCamelCase_ =py_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ ='''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ =[2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowerCamelCase_ =tokenizer([raw_input_str], return_tensors=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowerCamelCase_ ='''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ =[413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowerCamelCase_ =tokenizer([raw_input_str], return_tensors=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''This is going to be way too long.''' * 150, '''short example''']
lowerCamelCase_ =['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
lowerCamelCase_ =self._large_tokenizer(
text_target=lowerCAmelCase, max_length=5, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''input_ids''': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =PegasusTokenizer
lowercase : List[str] =PegasusTokenizerFast
lowercase : List[Any] =True
lowercase : Union[str, Any] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ =PegasusTokenizer(lowerCAmelCase, offset=0, mask_token_sent=lowerCAmelCase, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =(
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ =rust_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
lowerCamelCase_ =py_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''This is going to be way too long.''' * 1_000, '''short example''']
lowerCamelCase_ =['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
lowerCamelCase_ =self._large_tokenizer(
text_target=lowerCAmelCase, max_length=5, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase ) == 2 # input_ids, attention_mask.
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =(
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase ).input_ids
self.assertListEqual(
lowerCAmelCase, [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1], )
| 75 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCamelCase ( __A ):
'''simple docstring'''
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE : int = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE : Tuple = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE : Optional[int] = 128
SCREAMING_SNAKE_CASE : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE : Optional[int] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE : List[str] = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE : Dict = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE : str = 4
def _map_to_encoder_decoder_inputs(a : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE : Any = tokenizer(batch["article"] , padding="max_length" , truncation=a , max_length=512 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(batch["highlights"] , padding="max_length" , truncation=a , max_length=128 )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.input_ids
SCREAMING_SNAKE_CASE : List[str] = inputs.attention_mask
SCREAMING_SNAKE_CASE : str = outputs.input_ids
SCREAMING_SNAKE_CASE : List[str] = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE : List[str] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE : Any = outputs.attention_mask
assert all(len(a ) == 512 for x in inputs.input_ids )
assert all(len(a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a : int ):
SCREAMING_SNAKE_CASE : Dict = pred.label_ids
SCREAMING_SNAKE_CASE : Optional[Any] = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a ) )] ) / len(a )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=a , batch_size=a , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = SeqaSeqTrainingArguments(
output_dir=a , per_device_train_batch_size=a , per_device_eval_batch_size=a , predict_with_generate=a , evaluation_strategy="steps" , do_train=a , do_eval=a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE : Optional[Any] = SeqaSeqTrainer(
model=a , args=a , compute_metrics=_compute_metrics , train_dataset=a , eval_dataset=a , tokenizer=a , )
# start training
trainer.train() | 76 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = (DDPMScheduler,)
def _UpperCAmelCase ( self , **a ) -> str:
lowercase__ : int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _UpperCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a )
def _UpperCAmelCase ( self ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _UpperCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _UpperCAmelCase ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _UpperCAmelCase ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _UpperCAmelCase ( self ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _UpperCAmelCase ( self ) -> List[str]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**a )
lowercase__ : List[Any] = len(a )
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowercase__ : Dict = model(a , a )
# 2. predict previous mean of sample x_t-1
lowercase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : List[str] = pred_prev_sample
lowercase__ : Any = torch.sum(torch.abs(a ) )
lowercase__ : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase__ : Optional[Any] = scheduler_class(**a )
lowercase__ : int = len(a )
lowercase__ : Dict = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter
lowercase__ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowercase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowercase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : List[Any] = pred_prev_sample
lowercase__ : Dict = torch.sum(torch.abs(a ) )
lowercase__ : List[str] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**a )
lowercase__ : Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowercase__ : Union[str, Any] = -1
else:
lowercase__ : int = timesteps[i + 1]
lowercase__ : Tuple = scheduler.previous_timestep(a )
lowercase__ : List[Any] = prev_t.item()
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[str] = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**a )
lowercase__ : Optional[Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**a )
lowercase__ : Dict = [1_0_0, 8_7, 5_0, 1, 0]
lowercase__ : Dict = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**a )
lowercase__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 77 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :int , lowercase_ :Union[str, Any] , lowercase_ :List[str]=7 , lowercase_ :Any=3 , lowercase_ :List[str]=30 , lowercase_ :Union[str, Any]=4_00 , lowercase_ :int=True , lowercase_ :Optional[int]=None , lowercase_ :Any=True , lowercase_ :Optional[Any]=1 / 2_55 , lowercase_ :Union[str, Any]=True , lowercase_ :Any=[0.5, 0.5, 0.5] , lowercase_ :int=[0.5, 0.5, 0.5] , lowercase_ :List[Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self :int , lowercase_ :List[str] , lowercase_ :List[str]=False ) -> Union[str, Any]:
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase = self.size['shortest_edge']
elif w > h:
UpperCAmelCase = self.size['shortest_edge']
UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase = self.size['shortest_edge']
UpperCAmelCase = self.size['shortest_edge']
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = DetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase_ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'do_pad' ) )
def UpperCAmelCase__ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , lowercase_ )
UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self :Dict ) -> int:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self :List[str] ) -> List[str]:
# prepare image and target
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'image_id': 3_97_69, 'annotations': target}
# encode them
UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
UpperCAmelCase = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
UpperCAmelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
# prepare image, target and masks_path
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
UpperCAmelCase = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
UpperCAmelCase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
UpperCAmelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 78 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
_A = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowercase ):
_A = burst_time[i]
_A = 0
_A = 0
_A = 9_9999_9999
_A = 0
_A = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_A = remaining_time[j]
_A = j
_A = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_A = remaining_time[short]
if minm == 0:
_A = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
_A = False
# Find finish time of current process
_A = increment_time + 1
# Calculate waiting time
_A = finish_time - arrival_time[short]
_A = finar - burst_time[short]
if waiting_time[short] < 0:
_A = 0
# Increment time
increment_time += 1
return waiting_time
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
for i in range(__lowercase ):
_A = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> None:
'''simple docstring'''
_A = 0
_A = 0
for i in range(__lowercase ):
_A = total_waiting_time + waiting_time[i]
_A = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCamelCase_ = int(input())
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCamelCase_ , lowerCamelCase_ = map(int, input().split())
lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase_ = burst_time
lowerCamelCase_ = no_of_processes
lowerCamelCase_ = waiting_time
lowerCamelCase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 79 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_lowerCAmelCase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_snake_case ) , x.transpose() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , np.asarray(transpose(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(_snake_case , axes=(1, 2, 0) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.reshape(_snake_case , (4, 3) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.reshape(_snake_case , (12, 5) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.asarray(reshape(_snake_case , (4, 3) ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.asarray(reshape(_snake_case , (12, 5) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.squeeze(_snake_case ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.squeeze(_snake_case , axis=2 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.asarray(squeeze(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.asarray(squeeze(_snake_case , axis=2 ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.expand_dims(_snake_case , axis=1 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.asarray(expand_dims(_snake_case , axis=1 ) ) ) )
| 82 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
'''simple docstring'''
from manim import *
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = Rectangle(height=0.5 ,width=0.5 )
_UpperCamelCase : Tuple = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCamelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : List[Any] = VGroup(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = Text('CPU' ,font_size=24 )
_UpperCamelCase : Optional[Any] = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Any = Text('GPU' ,font_size=24 )
_UpperCamelCase : Dict = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ ,lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Optional[Any] = Text('Model' ,font_size=24 )
_UpperCamelCase : int = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,)
_UpperCamelCase : List[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
_UpperCamelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Tuple = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ,run_time=2.5 ) ,Write(lowerCamelCase__ ) ,Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCamelCase : Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ ,opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCamelCase : List[Any] = 0.4_6 / 4
_UpperCamelCase : Optional[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase__ ,buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ ,run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 83 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE : Dict = "base_with_context"
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case_ = ly_weight["attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = ly_weight["attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase_( snake_case : Dict , snake_case : int ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case_ = ly_weight["self_attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = ly_weight["MultiHeadDotProductAttention_0"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case_ = jnp.tree_util.tree_map(onp.array , snake_case )
snake_case_ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case_ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case_ = inference.parse_training_gin_file(snake_case , snake_case )
snake_case_ = inference.InferenceModel(args.checkpoint_path , snake_case )
snake_case_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case_ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , snake_case )
snake_case_ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , snake_case )
snake_case_ = load_decoder(ta_checkpoint["target"]["decoder"] , snake_case )
snake_case_ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case_ = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
main(args)
| 85 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Dict = (DDPMParallelScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : int = self.dummy_sample_deter + 0.1
__lowerCAmelCase : Tuple = self.dummy_sample_deter - 0.1
__lowerCAmelCase : Tuple = samplea.shape[0]
__lowerCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase : str = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase : Tuple = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Any = pred_prev_sample
__lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
__lowerCAmelCase : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = pred_prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[int] = -1
else:
__lowerCAmelCase : Dict = timesteps[i + 1]
__lowerCAmelCase : Optional[int] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [1_00, 87, 50, 1, 0]
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 86 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
UpperCamelCase = {
'''openbmb/cpm-ant-10b''': 1024,
}
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = collections.OrderedDict()
with open(_lowerCamelCase , "r" , encoding="utf-8") as reader:
lowercase__ : Tuple = reader.readlines()
for index, token in enumerate(_lowerCamelCase):
lowercase__ : Optional[int] = token.rstrip("\n")
lowercase__ : Optional[int] = index
return vocab
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : int , lowercase_ : str="<unk>" , lowercase_ : List[Any]=2_00 ) -> Union[str, Any]:
lowercase__ : Any = vocab
lowercase__ : int = unk_token
lowercase__ : Union[str, Any] = max_input_chars_per_word
def __UpperCamelCase ( self : Dict , lowercase_ : Any ) -> List[Any]:
lowercase__ : List[str] = list(lowercase_ )
if len(lowercase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase__ : int = 0
lowercase__ : Union[str, Any] = []
while start < len(lowercase_ ):
lowercase__ : int = len(lowercase_ )
lowercase__ : List[Any] = None
while start < end:
lowercase__ : int = "".join(chars[start:end] )
if substr in self.vocab:
lowercase__ : Tuple = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase_ )
lowercase__ : Dict = end
return sub_tokens
class snake_case_ ( __A ):
__A : List[str] = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ["input_ids", "attention_mask"]
__A : Optional[Any] = False
def __init__( self : Optional[Any] , lowercase_ : str , lowercase_ : str="<d>" , lowercase_ : Dict="</d>" , lowercase_ : str="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Tuple="<pad>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]="</n>" , lowercase_ : int="</_>" , lowercase_ : List[Any]="left" , **lowercase_ : Union[str, Any] , ) -> Dict:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowercase_ , eod_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , unk_token=lowercase_ , line_token=lowercase_ , space_token=lowercase_ , padding_side=lowercase_ , **lowercase_ , )
lowercase__ : Dict = bod_token
lowercase__ : List[Any] = eod_token
lowercase__ : List[str] = load_vocab(lowercase_ )
lowercase__ : Tuple = self.encoder[space_token]
lowercase__ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) )
lowercase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowercase__ : Dict = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder["\n"]
@property
def __UpperCamelCase ( self : Any ) -> int:
return len(self.encoder )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int ) -> Tuple:
lowercase__ : Any = []
for x in jieba.cut(lowercase_ , cut_all=lowercase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) )
return output_tokens
def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] , **lowercase_ : str ) -> Dict:
lowercase__ : str = [i for i in token_ids if i >= 0]
lowercase__ : List[str] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : Dict ) -> int:
return token in self.encoder
def __UpperCamelCase ( self : str , lowercase_ : List[str] ) -> str:
return "".join(lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> List[str]:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> Optional[int]:
return self.decoder.get(lowercase_ , self.unk_token )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(lowercase_ ):
lowercase__ : Dict = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ : Optional[Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory
lowercase__ : int = 0
if " " in self.encoder:
lowercase__ : Tuple = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowercase__ : List[str] = self.encoder["\n"]
del self.encoder["\n"]
lowercase__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) )
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ : Union[str, Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ ))
| 87 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : int = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( A_, A_, A_=None, A_=None, A_=None, A_=None, A_=None, A_=None, ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=False , UpperCamelCase__ : int=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str=0.02 , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(UpperCamelCase__ , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , )
__magic_name__ = prepare_blenderbot_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = 20
__magic_name__ = model_class_name(UpperCamelCase__ )
__magic_name__ = model.encode(inputs_dict["""input_ids"""] )
__magic_name__ , __magic_name__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , )
__magic_name__ = model.decode(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _lowercase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = 20
__magic_name__ = model_class_name(UpperCamelCase__ )
__magic_name__ = model.encode(inputs_dict["""input_ids"""] )
__magic_name__ , __magic_name__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
__magic_name__ = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__ )
__magic_name__ = lm_model(input_ids=UpperCamelCase__ )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase__ )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__ )
__magic_name__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(UpperCamelCase__ , 1 , 2 )
__magic_name__ = np.equal(UpperCamelCase__ , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(UpperCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( _A , unittest.TestCase , _A ):
'''simple docstring'''
a__ = True
a__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = FlaxBlenderbotModelTester(self )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_class(UpperCamelCase__ )
@jax.jit
def encode_jitted(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = encode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__magic_name__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , )
with self.subTest("""JIT Enabled""" ):
__magic_name__ = decode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__magic_name__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCamelCase__ )
__magic_name__ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__magic_name__ = ["""Sam"""]
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""jax""" )
__magic_name__ = model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = """Sam is a great name. It means \"sun\" in Gaelic."""
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , **UpperCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 88 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowerCAmelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_a : List[Any] = 'lm_head'
_a : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
_a : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
_a : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : Optional[Any] = value
elif weight_type == "weight_g":
_a : str = value
elif weight_type == "weight_v":
_a : Optional[int] = value
elif weight_type == "bias":
_a : Any = value
else:
_a : Tuple = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : List[Any] = []
_a : Dict = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_a : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_a : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : List[str] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_a : int = True
if "*" in mapped_key:
_a : Dict = name.split(lowerCAmelCase_ )[0].split('.' )[-2]
_a : int = mapped_key.replace('*' , lowerCAmelCase_ )
if "weight_g" in name:
_a : Any = 'weight_g'
elif "weight_v" in name:
_a : List[Any] = 'weight_v'
elif "bias" in name:
_a : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : int = 'weight'
else:
_a : List[str] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : int = full_name.split('conv_layers.' )[-1]
_a : Tuple = name.split('.' )
_a : Optional[Any] = int(items[0] )
_a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ) -> Dict:
if config_path is not None:
_a : List[str] = UniSpeechConfig.from_pretrained(lowerCAmelCase_ )
else:
_a : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
_a : Optional[Any] = Dictionary.load_from_json(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Optional[Any] = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : List[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(lowerCAmelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_a : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_a : List[Any] = 42
_a : Any = 43
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[Any] = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , )
_a : List[Any] = True if config.feat_extract_norm == 'layer' else False
_a : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_a : Dict = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_a : List[str] = UniSpeechForCTC(lowerCAmelCase_ )
else:
_a : int = UniSpeechForPreTraining(lowerCAmelCase_ )
if is_finetuned:
_a , _a , _a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_unispeech.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 89 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = '''nat'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=64 , lowerCamelCase__=[3, 4, 6, 5] , lowerCamelCase__=[2, 4, 8, 16] , lowerCamelCase__=7 , lowerCamelCase__=3.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(lowerCamelCase__ )
__lowerCamelCase = num_heads
__lowerCamelCase = kernel_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 90 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCAmelCase_ : List[str] = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _str_to_version_tuple(self.version_str)
def __repr__( self : Optional[Any]):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any]):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
return Version(lowercase_)
elif isinstance(lowercase_ , lowercase_):
return other
raise TypeError(F'{other} (type {type(lowercase_)}) cannot be compared to version.')
def __eq__( self : str , lowercase_ : Optional[Any]):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ : List[str] = self._validate_operand(lowercase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Dict , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self._validate_operand(lowercase_)
return self.tuple < other.tuple
def __hash__( self : List[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return self.version_str
def _A (__a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = _VERSION_REG.match(__a )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(__a ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def _A (__a ) -> List[str]:
"""simple docstring"""
return ".".join(str(__a ) for v in version_tuple )
| 91 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
from __future__ import annotations
from math import pow, sqrt
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) + pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=sys.maxsize ):
"""simple docstring"""
lowercase_ : str = '''bilinear'''
lowercase_ : int = max_size
lowercase_ : int = short_edge_length
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
for img in imgs:
lowercase_ , lowercase_ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase_ : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase_ : str = size * 1.0 / min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if h < w:
lowercase_ , lowercase_ : Tuple = size, scale * w
else:
lowercase_ , lowercase_ : str = scale * h, size
if max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) > self.max_size:
lowercase_ : Union[str, Any] = self.max_size * 1.0 / max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = newh * scale
lowercase_ : Union[str, Any] = neww * scale
lowercase_ : Tuple = int(neww + 0.5 )
lowercase_ : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase_ : Union[str, Any] = Image.fromarray(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : List[Any] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase_ : int = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=__SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(__SCREAMING_SNAKE_CASE )
return img_augs
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase_ : int = cfg.INPUT.FORMAT
lowercase_ : Optional[int] = cfg.SIZE_DIVISIBILITY
lowercase_ : List[Any] = cfg.PAD_VALUE
lowercase_ : Optional[Any] = cfg.INPUT.MAX_SIZE_TEST
lowercase_ : Tuple = cfg.MODEL.DEVICE
lowercase_ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : Union[str, Any] = lambda __SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tuple(max(__SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
lowercase_ : List[Any] = [im.shape[-2:] for im in images]
lowercase_ : int = [
nn.functional.pad(
__SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return torch.stack(__SCREAMING_SNAKE_CASE ), torch.tensor(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [images]
if single_image:
assert len(__SCREAMING_SNAKE_CASE ) == 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__SCREAMING_SNAKE_CASE , images.pop(__SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(__SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase_ : Any = torch.tensor([im.shape[:2] for im in images] )
lowercase_ : Tuple = self.aug(__SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase_ : Any = [self.normalizer(__SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
lowercase_ , lowercase_ : Optional[Any] = self.pad(__SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase_ : List[str] = torch.true_divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(__SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
lowercase_ , lowercase_ : Union[str, Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case : Any = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['input_values', 'attention_mask']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
a :Union[str, Any] = do_normalize
a :List[Any] = return_attention_mask
a :List[str] = num_mel_bins
a :List[str] = hop_length
a :List[Any] = win_length
a :List[Any] = win_function
a :List[str] = frame_signal_scale
a :List[str] = fmin
a :Tuple = fmax
a :List[Any] = mel_floor
a :Union[str, Any] = reduction_factor
a :Union[str, Any] = win_length * sampling_rate // 1000
a :Dict = hop_length * sampling_rate // 1000
a :Any = optimal_fft_length(self.sample_size )
a :List[Any] = (self.n_fft // 2) + 1
a :Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
a :str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ):
if attention_mask is not None:
a :List[Any] = np.array(_lowerCamelCase , np.intaa )
a :List[str] = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
a :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a :Union[str, Any] = padding_value
normed_input_values.append(_lowerCamelCase )
else:
a :List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , ):
a :Union[str, Any] = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
a :Optional[Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
a :int = None
if audio_target is not None:
a :Optional[int] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
a :Optional[Any] = inputs_target['''input_values''']
a :Union[str, Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
a :str = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a :List[Any] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a :str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a :List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a :List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
a :Optional[int] = self.feature_size
# convert into correct format for padding
if is_target:
a :List[Any] = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
a :List[Any] = BatchFeature({'''input_values''': features} )
a :List[Any] = self.num_mel_bins
else:
a :List[str] = BatchFeature({'''input_values''': speech} )
a :Optional[int] = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
a :List[str] = feature_size_hack
# convert input values to correct format
a :Tuple = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
a :int = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a :Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a :Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a :Any = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a :Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a :Union[str, Any] = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a :List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a :Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 94 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : int = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 95 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = 0.0_0
_lowerCamelCase : Tuple = 0
for resistor in resistors:
if resistor <= 0:
_lowerCamelCase : Optional[int] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = 0.0_0
_lowerCamelCase : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCamelCase : Any = f'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Any = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :Tuple = batch_size
UpperCamelCase__ :int = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :List[str] = min_resolution
UpperCamelCase__ :Optional[int] = max_resolution
UpperCamelCase__ :Optional[Any] = do_resize
UpperCamelCase__ :Optional[int] = size
UpperCamelCase__ :List[Any] = apply_ocr
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''apply_ocr''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase__ :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase_ )
self.assertIsInstance(encoding.boxes , UpperCamelCase_ )
# Test batched
UpperCamelCase__ :int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ :int = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase__ :int = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase__ :str = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ :Tuple = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase__ :Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase_ )
self.assertListEqual(encoding.boxes , UpperCamelCase_ )
# with apply_OCR = False
UpperCamelCase__ :List[str] = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ )
UpperCamelCase__ :Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 97 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = to_pil_image(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = pil_image.size
UpperCAmelCase__ = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type='dict' , config=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ = [idx for idx, word in enumerate(lowerCamelCase ) if not word.strip()]
UpperCAmelCase__ = [word for idx, word in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ = []
for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase )
# finally, normalize the bounding boxes
UpperCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : int ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : Any ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_value
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ = apply_ocr
UpperCAmelCase__ = ocr_lang
UpperCAmelCase__ = tesseract_config
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[int] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'pytesseract' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCAmelCase__ = words_batch
UpperCAmelCase__ = boxes_batch
return data
| 98 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase : Any = """
Human: <<task>>
Assistant: """
lowercase : List[str] = """huggingface-tools/default-prompts"""
lowercase : str = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def A_ ( A__ , A__ , A__="run" ) -> Dict:
if prompt_or_repo_id is None:
a__ : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , A__ ) is not None:
return prompt_or_repo_id
a__ : List[str] = cached_file(
A__ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(A__ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 99 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 100 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self):
lowercase = ''''''
lowercase = ''''''
lowercase = []
lowercase = 0
lowercase = 2_5_6
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
def A__ ( self ,A__):
lowercase = cva.imread(A__ ,0)
lowercase = copy.deepcopy(self.img)
lowercase , lowercase , lowercase = plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6] ,label='''x''')
lowercase = np.sum(A__)
for i in range(len(A__)):
lowercase = x[i] / self.k
self.sk += prk
lowercase = (self.L - 1) * self.sk
if self.rem != 0:
lowercase = int(last % last)
lowercase = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(A__)
lowercase = int(np.ma.count(self.img) / self.img[1].size)
lowercase = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
lowercase = self.img[j][i]
if num != self.last_list[num]:
lowercase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' ,self.img)
def A__ ( self):
plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6])
def A__ ( self):
cva.imshow('''Output-Image''' ,self.img)
cva.imshow('''Input-Image''' ,self.original_image)
cva.waitKey(5_0_0_0)
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ :List[Any] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowercase__ :str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 101 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
"""simple docstring"""
from timeit import timeit
SCREAMING_SNAKE_CASE : Dict = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( _snake_case : str ) ->bool:
"""simple docstring"""
__snake_case : Dict = 0
__snake_case : int = len(_snake_case ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( _snake_case : str ) ->bool:
"""simple docstring"""
__snake_case : List[Any] = len(_snake_case ) // 2
__snake_case : Optional[Any] = len(_snake_case )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_snake_case ) )
def lowercase ( _snake_case : str ) ->bool:
"""simple docstring"""
if len(_snake_case ) <= 2:
return True
if s[0] == s[len(_snake_case ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( _snake_case : str ) ->bool:
"""simple docstring"""
return s == s[::-1]
def lowercase ( _snake_case : str ) ->None:
"""simple docstring"""
__snake_case : List[str] = f"""all({name}(key) is value for key, value in test_data.items())"""
__snake_case : Optional[int] = f"""from __main__ import test_data, {name}"""
__snake_case : List[str] = 500_000
__snake_case : Any = timeit(stmt=_snake_case , setup=_snake_case , number=_snake_case )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 102 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Optional[Any] , A_ : Dict , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : Dict=False , A_ : Union[str, Any]=1_0 , A_ : Optional[Any]=3 , A_ : str=3_2 * 8 , A_ : List[str]=3_2 * 8 , A_ : Dict=4 , A_ : List[Any]=6_4 , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : int = use_auxiliary_loss
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Union[str, Any] = min_size
lowerCAmelCase_ : Optional[int] = max_size
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = hidden_dim
lowerCAmelCase_ : List[str] = hidden_dim
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
A_)
lowerCAmelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_)
lowerCAmelCase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_) > 0.5
).float()
lowerCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=A_) > 0.5).long()
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ : Dict = self.num_queries
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : str = [1, 1, 1, 1]
lowerCAmelCase_ : Dict = self.num_channels
lowerCAmelCase_ : List[str] = 6_4
lowerCAmelCase_ : Union[str, Any] = 1_2_8
lowerCAmelCase_ : str = self.hidden_dim
lowerCAmelCase_ : Optional[Any] = self.hidden_dim
lowerCAmelCase_ : Any = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : Any = output.encoder_hidden_states
lowerCAmelCase_ : int = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , config.decoder_layers)
def UpperCAmelCase__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str=False):
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = MaskaFormerModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : Any = model(A_ , output_hidden_states=A_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(A_ , A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Tuple , A_ : Any):
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(config=A_)
model.to(A_)
model.eval()
def comm_check_on_output(A_ : Optional[int]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : List[Any] = model(A_)
comm_check_on_output(A_)
lowerCAmelCase_ : Any = model(
pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_)
comm_check_on_output(A_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Tuple = MaskaFormerModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[int]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_)
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''')
def UpperCAmelCase__ ( self : Tuple):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : Tuple):
pass
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_)
lowerCAmelCase_ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ : List[str] = MaskaFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A_),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=A_),
'''class_labels''': torch.zeros(2 , 1_0 , device=A_).long(),
}
lowerCAmelCase_ : Union[str, Any] = self.model_tester.get_config()
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(A_).to(A_)
lowerCAmelCase_ : Union[str, Any] = model(**A_)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_).to(A_)
lowerCAmelCase_ : List[str] = model(**A_ , output_attentions=A_)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase__ ( self : List[Any]):
if not self.model_tester.is_training:
return
lowerCAmelCase_ : Dict = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(A_)
model.to(A_)
model.train()
lowerCAmelCase_ : List[Any] = model(A_ , mask_labels=A_ , class_labels=A_).loss
loss.backward()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Any = model_class(A_).to(A_)
model.train()
lowerCAmelCase_ : List[str] = model(A_ , mask_labels=A_ , class_labels=A_)
lowerCAmelCase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
A__ : int = 1E-4
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Optional[Any]):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(A_)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Any = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**A_)
lowerCAmelCase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[int] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**A_)
# masks_queries_logits
lowerCAmelCase_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
lowerCAmelCase_ : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase_ : Optional[Any] = torch.tensor(A_).to(A_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_))
# class_queries_logits
lowerCAmelCase_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
lowerCAmelCase_ : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(A_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='''pt''' , )
lowerCAmelCase_ : Dict = inputs['''pixel_values'''].to(A_)
lowerCAmelCase_ : Tuple = [el.to(A_) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : str = [el.to(A_) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
self.assertTrue(outputs.loss is not None)
| 103 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ ):
"""simple docstring"""
__lowercase = R'''\w+[.]\d+'''
__lowercase = re.findall(A__ , A__ )
for pat in pats:
__lowercase = key.replace(A__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowercase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowercase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _A ( A__ , A__ , A__=42 ):
"""simple docstring"""
__lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowercase = flax_model.init_weights(PRNGKey(A__ ) )
__lowercase = flatten_dict(A__ )
__lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowercase = rename_key(A__ )
__lowercase = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__lowercase , __lowercase = rename_key_and_reshape_tensor(A__ , A__ , A__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(A__ )
return unflatten_dict(A__ )
| 104 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =DistilBertTokenizer
lowerCamelCase : Optional[Any] =DistilBertTokenizerFast
lowerCamelCase : List[Any] =True
@slow
def __a ( self ) -> Any:
a : Dict = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
a : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 105 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ,lowercase_ : List[str] ,lowercase_ : Optional[int]=1_4 ,lowercase_ : Any=7 ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Any=True ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Union[str, Any]=9_9 ,lowercase_ : Any=3_2 ,lowercase_ : str=5 ,lowercase_ : int=4 ,lowercase_ : str=3_7 ,lowercase_ : Optional[Any]="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : Any=0.1 ,lowercase_ : Tuple=5_1_2 ,lowercase_ : str=1_6 ,lowercase_ : str=2 ,lowercase_ : List[Any]=0.02 ,lowercase_ : int=3 ,lowercase_ : List[str]=4 ,lowercase_ : int=None ,):
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : Optional[int] = use_input_mask
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : Optional[Any] = use_mc_token_ids
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : Optional[int] = num_choices
lowerCAmelCase__ : Dict = scope
lowerCAmelCase__ : List[Any] = self.vocab_size - 1
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_mc_token_ids:
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Optional[int] = self.get_config()
lowerCAmelCase__ : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self : int ):
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[int] ,lowercase_ : Dict ,lowercase_ : List[Any] ,lowercase_ : Any ,lowercase_ : str ,*lowercase_ : str ):
lowerCAmelCase__ : Tuple = CTRLModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
model(lowercase_ ,token_type_ids=lowercase_ ,head_mask=lowercase_ )
model(lowercase_ ,token_type_ids=lowercase_ )
lowerCAmelCase__ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Dict ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : List[str] ,lowercase_ : Any ,*lowercase_ : Optional[Any] ):
lowerCAmelCase__ : List[str] = CTRLLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Tuple = config_and_inputs
lowerCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __lowerCAmelCase ( self : Any ,lowercase_ : str ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Optional[int] ,*lowercase_ : Optional[Any] ):
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = CTRLForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Dict = model(lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : Dict ,lowercase_ : Dict ,lowercase_ : Any ,lowercase_ : List[str] ,lowercase_ : str ,lowercase_ : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = CTRLModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self ,config_class=lowercase_ ,n_embd=3_7 )
def __lowerCAmelCase ( self : Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@slow
def __lowerCAmelCase ( self : Any ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = CTRLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self : Any ):
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowercase_ )
lowerCAmelCase__ : str = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] ,dtype=torch.long ,device=lowercase_ ) # Legal the president is
lowerCAmelCase__ : List[str] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase__ : List[Any] = model.generate(lowercase_ ,do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() ,lowercase_ )
| 106 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = filter(lambda A : p.requires_grad, model.parameters() )
a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : int = logging.getLogger(__name__)
def __magic_name__ ( A : List[Any], A : Dict ):
'''simple docstring'''
if metric == "rouge2":
a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
a = ModelCheckpoint(
dirpath=A, filename=A, monitor=F"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=A, verbose=A, )
class snake_case__ (pl.Callback ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ) -> Union[str, Any]:
a = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : Any , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule , __lowerCamelCase : str , __lowerCamelCase : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
a = Path(pl_module.hparams.output_dir )
if type_path == "test":
a = od / "test_results.txt"
a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
a = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , "a+" ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
a = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
a = val.item()
a = f"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> Any:
try:
a = pl_module.model.model.num_parameters()
except AttributeError:
a = pl_module.model.num_parameters()
a = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , "test" )
@rank_zero_only
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : Any ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 107 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , "html.parser" )
lowerCAmelCase : int = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCAmelCase : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 108 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
import re
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCamelCase , UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 109 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: float = 0.0 , UpperCamelCase_: int = 50 , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowercase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a ( ) -> Dict:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : Union[str, Any] = numpy.array([0, 0])
__lowerCAmelCase : Dict = numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : Tuple = numpy.array([1, 0])
__lowerCAmelCase : str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = initial_vectors
for _ in range(A_ ):
__magic_name__ = iteration_step(A_ )
return vectors
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = []
for i, start_vector in enumerate(vectors[:-1] ):
__magic_name__ = vectors[i + 1]
new_vectors.append(A_ )
__magic_name__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = numpy.radians(A_ )
__magic_name__ , __magic_name__ = numpy.cos(A_ ), numpy.sin(A_ )
__magic_name__ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__magic_name__ , __magic_name__ = zip(*A_ )
plt.plot(A_, A_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : int = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 88 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : Tuple = '''levit'''
def __init__( self, __magic_name__=224, __magic_name__=3, __magic_name__=3, __magic_name__=2, __magic_name__=1, __magic_name__=16, __magic_name__=[128, 256, 384], __magic_name__=[4, 8, 12], __magic_name__=[4, 4, 4], __magic_name__=[16, 16, 16], __magic_name__=0, __magic_name__=[2, 2, 2], __magic_name__=[2, 2, 2], __magic_name__=0.02, **__magic_name__, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
UpperCamelCase__ : int = image_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : Optional[Any] = kernel_size
UpperCamelCase__ : Any = stride
UpperCamelCase__ : List[Any] = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : List[Any] = depths
UpperCamelCase__ : int = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : List[Any] = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : int = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 201 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__A = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__A = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
lowercase__: Dict = []
for i in range(len(__UpperCAmelCase ) ):
lowercase__: Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase__: Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase__: List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: int = []
for _ in range(__UpperCAmelCase ):
# Create output image
lowercase__: Dict = Image.new('''RGB''' , (len(cells[0] ), len(__UpperCAmelCase )) )
lowercase__: List[str] = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
lowercase__: List[Any] = 2_5_5 - cells[y][x] * 2_5_5
lowercase__: Dict = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
lowercase__: List[str] = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
__A = generate_images(GLIDER, 1_6)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : List[str] , UpperCamelCase__ : list[int])-> None:
'''simple docstring'''
__lowerCAmelCase: List[Any] = len(UpperCAmelCase__)
__lowerCAmelCase: Tuple = [0] * len_array
if len_array > 0:
__lowerCAmelCase: Tuple = array[0]
for i in range(1 , UpperCAmelCase__):
__lowerCAmelCase: Dict = self.prefix_sum[i - 1] + array[i]
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int)-> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase_ ( self : int , UpperCamelCase__ : int)-> bool:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowercase : List[str] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[Any] , _lowercase : str , _lowercase : bool , _lowercase : str = None , _lowercase : list = None ):
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCAmelCase = '''\n'''.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(UpperCAmelCase__ , '''''' )
self.assertEqual(UpperCAmelCase__ , '''''' )
def a ( self : Tuple ):
self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase__ )
self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase__ )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _UpperCAmelCase ( __lowercase ):
a__ : Union[str, Any] = False
@classmethod
def a ( cls : str ):
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a ( cls : Tuple ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a ( self : Any ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def a ( self : Dict ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('''epoch 0:''' , UpperCAmelCase__ )
self.assertIn('''epoch 1:''' , UpperCAmelCase__ )
def a ( self : int ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , UpperCAmelCase__ )
self.assertIn('''epoch 1:''' , UpperCAmelCase__ )
else:
self.assertIn('''epoch 0:''' , UpperCAmelCase__ )
self.assertIn('''epoch 1:''' , UpperCAmelCase__ )
@slow
def a ( self : Dict ):
__UpperCAmelCase = '''\n examples/by_feature/cross_validation.py\n --num_folds 2\n '''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
__UpperCAmelCase = re.findall('''({.+})''' , UpperCAmelCase__ )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''tracking''' ) ) )
def a ( self : Any ):
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A : List[Any] = get_tests_dir("fixtures")
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->Optional[int]:
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 5_0_0
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase__ ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : Dict )->List[str]:
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowercase__ ( self : Optional[Any] )->List[Any]:
with self.assertRaises(UpperCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(UpperCAmelCase__ )
@is_staging_test
class _a ( unittest.TestCase):
"""simple docstring"""
@classmethod
def lowercase__ ( cls : List[str] )->Union[str, Any]:
_UpperCAmelCase = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def lowercase__ ( cls : Union[str, Any] )->Any:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(UpperCAmelCase__ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase__ , repo_id='''test-image-processor''' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(UpperCAmelCase__ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase__ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowercase__ ( self : Union[str, Any] )->Dict:
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(UpperCAmelCase__ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=UpperCAmelCase__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 260 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 8 ) -> Tuple:
__lowerCamelCase : Dict = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCamelCase__ )
__lowerCamelCase : str = i // 3
__lowerCamelCase : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowerCamelCase : List[str] = (
chars_incl
+ random(lowerCamelCase__ , quotient + remainder )
+ random(lowerCamelCase__ , lowerCamelCase__ )
+ random(lowerCamelCase__ , lowerCamelCase__ )
)
__lowerCamelCase : Union[str, Any] = list(lowerCamelCase__ )
shuffle(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 8 ) -> int:
if len(lowerCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowerCamelCase : Dict = any(char in ascii_uppercase for char in password )
__lowerCamelCase : List[Any] = any(char in ascii_lowercase for char in password )
__lowerCamelCase : List[Any] = any(char in digits for char in password )
__lowerCamelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : int = int(input('Please indicate the max length of your password: ' ).strip() )
__lowerCamelCase : Tuple = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowerCamelCase__ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowerCamelCase__ , lowerCamelCase__ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = order
# a_{0} ... a_{k}
lowerCamelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase = [0.0] * self.order
def __A ( self , A , A ) -> None:
'''simple docstring'''
if len(UpperCAmelCase__ ) < self.order:
lowerCamelCase = [1.0, *a_coeffs]
if len(UpperCAmelCase__ ) != self.order + 1:
lowerCamelCase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(UpperCAmelCase__ )}'
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != self.order + 1:
lowerCamelCase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(UpperCAmelCase__ )}'
)
raise ValueError(UpperCAmelCase__ )
lowerCamelCase = a_coeffs
lowerCamelCase = b_coeffs
def __A ( self , A ) -> float:
'''simple docstring'''
lowerCamelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase = self.input_history[:-1]
lowerCamelCase = self.output_history[:-1]
lowerCamelCase = sample
lowerCamelCase = result
return result
| 252 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
import os
__A = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : int = 0
snake_case : str = 0
while index < len(lowercase__ ) - 1:
snake_case : Dict = SYMBOLS[numerals[index]]
snake_case : Tuple = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase__ ( lowercase__ : int ):
snake_case : Union[str, Any] = ""
snake_case : Optional[int] = num // 1000
numerals += m_count * "M"
num %= 1000
snake_case : Union[str, Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
snake_case : List[str] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase__ ( lowercase__ : str = "/p089_roman.txt" ):
snake_case : int = 0
with open(os.path.dirname(lowercase__ ) + roman_numerals_filename ) as filea:
snake_case : str = filea.readlines()
for line in lines:
snake_case : List[Any] = line.strip()
snake_case : Any = parse_roman_numerals(lowercase__ )
snake_case : Tuple = generate_roman_numerals(lowercase__ )
savings += len(lowercase__ ) - len(lowercase__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 148 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.