code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
class __A :
"""simple docstring"""
def __init__( self , __A ) -> None:
a =data
a =None
a =None
def _A ( lowercase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _A ( lowercase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _A ( lowercase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _A ( ): # Main function for testing.
"""simple docstring"""
a =Node(1 )
a =Node(2 )
a =Node(3 )
a =Node(4 )
a =Node(5 )
a =Node(6 )
a =Node(7 )
a =Node(8 )
a =Node(9 )
print(is_full_binary_tree(lowercase ) )
print(depth_of_tree(lowercase ) )
print('''Tree is: ''' )
display(lowercase )
if __name__ == "__main__":
main() | 81 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
import argparse
from collections import defaultdict
import yaml
A__ = """docs/source/en/_toctree.yml"""
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(snake_case )
_lowerCAmelCase = new_doc_list
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_lowerCAmelCase = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def _UpperCAmelCase ( snake_case=False ):
"""simple docstring"""
with open(snake_case , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase = api_doc[scheduler_idx]["""sections"""]
_lowerCAmelCase = clean_doc_toc(snake_case )
_lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _UpperCAmelCase ( snake_case=False ):
"""simple docstring"""
with open(snake_case , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase = False
_lowerCAmelCase = api_doc[pipeline_idx]["""sections"""]
_lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase = pipeline_doc["""section"""]
_lowerCAmelCase = clean_doc_toc(snake_case )
if overwrite:
_lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
_lowerCAmelCase = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 82 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__ ( UpperCAmelCase_ ):
for param in module.parameters():
_UpperCamelCase : Dict = False
def A__ ( ):
_UpperCamelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCamelCase : Tuple = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = plt.imshow(UpperCAmelCase_ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase_ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase_ )
plt.show()
def A__ ( ):
_UpperCamelCase : int = datetime.now()
_UpperCamelCase : Tuple = current_time.strftime('%H:%M:%S' )
return timestamp
| 83 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase ():
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 86 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_ ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModel)
class snake_case_ ( _BaseAutoModelClass ):
__A : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case_ ( _BaseAutoModelClass ):
__A : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case_ ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 87 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Any=30 , UpperCamelCase__ : Optional[int]=400 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=1 / 255 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=[0.5, 0.5, 0.5] , UpperCamelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase__ : Tuple=True , ) -> List[str]:
"""simple docstring"""
__magic_name__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_pad
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=False ) -> int:
"""simple docstring"""
if not batched:
__magic_name__ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__magic_name__ , __magic_name__ = image.size
else:
__magic_name__ , __magic_name__ = image.shape[1], image.shape[2]
if w < h:
__magic_name__ = int(self.size["""shortest_edge"""] * h / w )
__magic_name__ = self.size["""shortest_edge"""]
elif w > h:
__magic_name__ = self.size["""shortest_edge"""]
__magic_name__ = int(self.size["""shortest_edge"""] * w / h )
else:
__magic_name__ = self.size["""shortest_edge"""]
__magic_name__ = self.size["""shortest_edge"""]
else:
__magic_name__ = []
for image in image_inputs:
__magic_name__ , __magic_name__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
__magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = DetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
__magic_name__ = DetrImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_rescale""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """rescale_factor""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__magic_name__ = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__magic_name__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors="""pt""" )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase__ )
__magic_name__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase__ )
__magic_name__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase__ ) )
# verify class_labels
__magic_name__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase__ ) )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase__ ) )
@slow
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__magic_name__ = json.loads(f.read() )
__magic_name__ = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__magic_name__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__magic_name__ = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__magic_name__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors="""pt""" )
# verify pixel values
__magic_name__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase__ )
__magic_name__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__magic_name__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase__ ) )
# verify boxes
__magic_name__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase__ )
__magic_name__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__magic_name__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase__ ) )
# verify is_crowd
__magic_name__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase__ ) )
# verify class_labels
__magic_name__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase__ ) )
# verify masks
__magic_name__ = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase__ )
# verify orig_size
__magic_name__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase__ ) )
# verify size
__magic_name__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase__ ) )
| 88 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 0 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCamelCase = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCamelCase = False
break
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCamelCase = list(range(2 , n + 1 ) )
__lowerCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCamelCase = 0
# filters actual prime numbers.
__lowerCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2"
__lowerCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase__ ):
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0"
__lowerCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCamelCase = 2
__lowerCamelCase = number
if number == 0 or number == 1:
ans.append(UpperCamelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase__ ):
while quotient != 1:
if is_prime(UpperCamelCase__ ) and (quotient % factor == 0):
ans.append(UpperCamelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCamelCase = 0
# prime factorization of 'number'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = max(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCamelCase = 0
# prime factorization of 'number'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = min(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ )
), "'number' must been an int, even and > 2"
__lowerCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCamelCase = get_prime_numbers(UpperCamelCase__ )
__lowerCamelCase = len(UpperCamelCase__ )
# run variable for while-loops.
__lowerCamelCase = 0
__lowerCamelCase = None
# exit variable. for break up the loops
__lowerCamelCase = True
while i < len_pn and loop:
__lowerCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (len(UpperCamelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCamelCase = 0
while numbera != 0:
__lowerCamelCase = numbera % numbera
__lowerCamelCase = numbera
__lowerCamelCase = rest
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
__lowerCamelCase = prime_factorization(UpperCamelCase__ )
elif numbera == 1 or numbera == 1:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ):
ans *= n
else:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCamelCase = prime_fac_a.count(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
ans *= n
done.append(UpperCamelCase__ )
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int"
__lowerCamelCase = 0
__lowerCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase__ ):
ans += 1
# precondition
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime(
UpperCamelCase__ ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
assert (
is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCamelCase = p_number_a + 1 # jump to the next number
__lowerCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase__ ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and ans[0] != p_number_a
and ans[len(UpperCamelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1"
__lowerCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase__ )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCamelCase = get_divisors(UpperCamelCase__ )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCamelCase = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) )
# precondition
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 1 # this will be return
for _ in range(n - 1 ):
__lowerCamelCase = ans
ans += fiba
__lowerCamelCase = tmp
return ans
| 90 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(lowercase_) , torch_builtin(lowercase_)))
self.assertFalse(torch.allclose(gelu_python(lowercase_) , gelu_new(lowercase_)))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Tuple = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_activation('''gelu_10''')
SCREAMING_SNAKE_CASE_ : str = torch_builtin(lowercase_)
SCREAMING_SNAKE_CASE_ : int = geluaa(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(lowercase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(lowercase_):
get_activation('''bogus''')
with self.assertRaises(lowercase_):
get_activation(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Dict = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = acta.a
| 91 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = GPTaTokenizer
_a : Dict = GPTaTokenizerFast
_a : Tuple = True
_a : List[Any] = {"""add_prefix_space""": True}
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
__lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_A )
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
__lowerCAmelCase = tokenizer.tokenize(_A , add_prefix_space=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=_A )
__lowerCAmelCase = "lower newer"
# Testing tokenization
__lowerCAmelCase = tokenizer.tokenize(_A , add_prefix_space=_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids without special tokens
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids with special tokens
__lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_prefix_space=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# Testing the unknown token
__lowerCAmelCase = tokens + [rust_tokenizer.unk_token]
__lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
__lowerCAmelCase = "This is a simple input"
__lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase = ("This is a simple input", "This is a pair")
__lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="max_length" )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="max_length" )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="max_length" , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="max_length" )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="max_length" )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="max_length" , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
__lowerCAmelCase = "This is a simple input"
__lowerCAmelCase = ["This is a simple input looooooooong", "This is a simple input"]
__lowerCAmelCase = ("This is a simple input", "This is a pair")
__lowerCAmelCase = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
__lowerCAmelCase = tokenizer.pad_token_id
__lowerCAmelCase = tokenizer(_A , padding="max_length" , max_length=3_0 , return_tensors="np" )
__lowerCAmelCase = tokenizer(_A , padding=_A , truncate=_A , return_tensors="np" )
__lowerCAmelCase = tokenizer(*_A , padding="max_length" , max_length=6_0 , return_tensors="np" )
__lowerCAmelCase = tokenizer(_A , padding=_A , truncate=_A , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "$$$"
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A )
__lowerCAmelCase = "This is a simple input"
__lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = tokenizer(_A )
__lowerCAmelCase = tokenizer(_A )
self.assertEqual(out_s.input_ids[0] , _A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCAmelCase = tokenizer.decode(out_s.input_ids )
__lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = [self.get_tokenizer(do_lower_case=_A , add_bos_token=_A )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = "Encode this."
__lowerCAmelCase = "This one too please."
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
encoded_sequence += tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.encode_plus(
_A , _A , add_special_tokens=_A , return_special_tokens_mask=_A , )
__lowerCAmelCase = encoded_sequence_dict["input_ids"]
__lowerCAmelCase = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_A ) , len(_A ) )
__lowerCAmelCase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_A )
]
__lowerCAmelCase = [x for x in filtered_sequence if x is not None]
self.assertEqual(_A , _A )
@require_tokenizers
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_A )
__lowerCAmelCase = "A photo of a cat"
__lowerCAmelCase = tokenizer.encode(
_A , )
self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
__lowerCAmelCase = AutoTokenizer.from_pretrained("./test_opt" )
__lowerCAmelCase = tokenizer.encode(
_A , )
self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=_A )
__lowerCAmelCase = "A photo of a cat"
__lowerCAmelCase = tokenizer.encode(
_A , )
# Same as above
self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_A )
__lowerCAmelCase = "bos"
__lowerCAmelCase = tokenizer.get_vocab()["bos"]
__lowerCAmelCase = "A photo of a cat"
__lowerCAmelCase = tokenizer.encode(
_A , )
# We changed the bos token
self.assertEqual(_A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
__lowerCAmelCase = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
__lowerCAmelCase = tokenizer.encode(
_A , )
self.assertEqual(_A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 92 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import torch
def snake_case_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowercase_ : int = torch.cuda.device_count()
else:
lowercase_ : Optional[int] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 93 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.2 , _lowerCamelCase=0.2 ):
a :Tuple = bp_numa
a :Tuple = bp_numa
a :int = bp_numa
a :Optional[Any] = conva_get[:2]
a :Dict = conva_get[2]
a :str = size_pa
a :str = rate_w
a :List[str] = rate_t
a :Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a :Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a :Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a :Tuple = -2 * np.random.rand(self.conva[1] ) + 1
a :Any = -2 * np.random.rand(self.num_bpa ) + 1
a :Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# save model dict with pickle
a :Any = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowerCamelCase , '''wb''' ) as f:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
print(F'''Model saved: {save_path}''' )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase ):
# read saved model
with open(_lowerCamelCase , '''rb''' ) as f:
a :str = pickle.load(_lowerCamelCase ) # noqa: S301
a :List[str] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
a :Optional[int] = model_dic.get('''size_pooling1''' )
a :str = model_dic.get('''num_bp1''' )
a :List[Any] = model_dic.get('''num_bp2''' )
a :Dict = model_dic.get('''num_bp3''' )
a :str = model_dic.get('''rate_weight''' )
a :str = model_dic.get('''rate_thre''' )
# create model instance
a :Optional[Any] = CNN(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# modify model parameter
a :Optional[Any] = model_dic.get('''w_conv1''' )
a :List[str] = model_dic.get('''wkj''' )
a :List[Any] = model_dic.get('''vji''' )
a :Optional[int] = model_dic.get('''thre_conv1''' )
a :Any = model_dic.get('''thre_bp2''' )
a :Optional[int] = model_dic.get('''thre_bp3''' )
return conv_ins
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return round(_lowerCamelCase , 3 )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# convolution process
a :Dict = convs[0]
a :Optional[Any] = convs[1]
a :Union[str, Any] = np.shape(_lowerCamelCase )[0]
# get the data slice of original image data, data_focus
a :List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowerCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowerCamelCase ):
a :Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowerCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
a :int = []
a :List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowerCamelCase ):
a :Tuple = []
for i_focus in range(len(_lowerCamelCase ) ):
a :str = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowerCamelCase ) )
a :str = np.asmatrix(_lowerCamelCase ).reshape(
_lowerCamelCase , _lowerCamelCase )
data_featuremap.append(_lowerCamelCase )
# expanding the data slice to One dimenssion
a :Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowerCamelCase ) )
a :Any = np.asarray(_lowerCamelCase )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="average_pool" ):
# pooling process
a :Any = len(featuremaps[0] )
a :List[str] = int(size_map / size_pooling )
a :List[str] = []
for i_map in range(len(_lowerCamelCase ) ):
a :Optional[int] = featuremaps[i_map]
a :str = []
for i_focus in range(0 , _lowerCamelCase , _lowerCamelCase ):
for j_focus in range(0 , _lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowerCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowerCamelCase ) )
a :Dict = np.asmatrix(_lowerCamelCase ).reshape(_lowerCamelCase , _lowerCamelCase )
featuremap_pooled.append(_lowerCamelCase )
return featuremap_pooled
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# expanding three dimension data to one dimension list
a :Optional[Any] = []
for i in range(len(_lowerCamelCase ) ):
a :Optional[int] = np.shape(data[i] )
a :Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
a :List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(_lowerCamelCase )
a :List[Any] = np.asarray(_lowerCamelCase )
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# expanding matrix to one dimension list
a :Optional[Any] = np.asarray(_lowerCamelCase )
a :Any = np.shape(_lowerCamelCase )
a :Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = []
a :List[str] = 0
for i_map in range(_lowerCamelCase ):
a :List[str] = np.ones((size_map, size_map) )
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = pd_pool[
i_pool
]
a :int = i_pool + 1
a :Optional[Any] = np.multiply(
_lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowerCamelCase )
return pd_all
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowerCamelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowerCamelCase )) )
a :Union[str, Any] = 0
a :List[Any] = []
a :Optional[Any] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a :Optional[Any] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(_lowerCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
a :List[Any] = np.asmatrix(datas_train[p] )
a :int = np.asarray(datas_teach[p] )
a , a :Union[str, Any] = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :Optional[int] = self.pooling(_lowerCamelCase , self.size_poolinga )
a :Optional[int] = np.shape(_lowerCamelCase )
a :List[str] = self._expand(_lowerCamelCase )
a :Tuple = data_bp_input
a :str = np.dot(_lowerCamelCase , self.vji.T ) - self.thre_bpa
a :Optional[Any] = self.sig(_lowerCamelCase )
a :str = np.dot(_lowerCamelCase , self.wkj.T ) - self.thre_bpa
a :Union[str, Any] = self.sig(_lowerCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a :Any = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowerCamelCase , (1 - bp_outa) ) )
a :str = np.multiply(
np.dot(_lowerCamelCase , self.wkj ) , np.multiply(_lowerCamelCase , (1 - bp_outa) ) )
a :int = np.dot(_lowerCamelCase , self.vji )
a :Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
a :List[str] = pd_conva_pooled.T.getA().tolist()
a :Optional[int] = self._calculate_gradient_from_pool(
_lowerCamelCase , _lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a :Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
a :List[Any] = self.rate_weight * np.dot(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a :Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a :str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a :List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a :Dict = self.thre_bpa - pd_k_all * self.rate_thre
a :Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a :Union[str, Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a :List[str] = rp + 1
a :List[str] = error_count / patterns
all_mse.append(_lowerCamelCase )
def draw_error():
a :Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowerCamelCase , '''+-''' )
plt.plot(_lowerCamelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowerCamelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# model predict
a :Any = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowerCamelCase )) )
for p in range(len(_lowerCamelCase ) ):
a :Dict = np.asmatrix(datas_test[p] )
a , a :Union[str, Any] = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :Optional[int] = self.pooling(_lowerCamelCase , self.size_poolinga )
a :int = self._expand(_lowerCamelCase )
a :Optional[int] = data_bp_input
a :Dict = bp_outa * self.vji.T - self.thre_bpa
a :List[Any] = self.sig(_lowerCamelCase )
a :Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
a :Tuple = self.sig(_lowerCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
a :Optional[Any] = [list(map(self.do_round , _lowerCamelCase ) ) for each in produce_out]
return np.asarray(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# return the data of image after convoluting process so we can check it out
a :Union[str, Any] = np.asmatrix(_lowerCamelCase )
a , a :str = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :str = self.pooling(_lowerCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 94 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Any = """Hello, World!"""
UpperCAmelCase : int = """en_XX"""
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
a__ : Any =Path("data_bin" )
a__ : Optional[Any] =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
a__ : int =xmod.model.encoder.sentence_encoder
a__ : Any =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
a__ : Union[str, Any] =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE )
a__ : str =XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
a__ : Tuple =xmod_sent_encoder.embed_tokens.weight
a__ : int =xmod_sent_encoder.embed_positions.weight
a__ : List[str] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
a__ : Tuple =xmod_sent_encoder.layernorm_embedding.weight
a__ : Any =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a__ : List[Any] =model.roberta.encoder.layer[i]
a__ : str =xmod_sent_encoder.layers[i]
# self attention
a__ : Union[str, Any] =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
a__ : Any =xmod_layer.self_attn.q_proj.weight
a__ : Optional[Any] =xmod_layer.self_attn.q_proj.bias
a__ : Optional[int] =xmod_layer.self_attn.k_proj.weight
a__ : Optional[int] =xmod_layer.self_attn.k_proj.bias
a__ : Any =xmod_layer.self_attn.v_proj.weight
a__ : List[str] =xmod_layer.self_attn.v_proj.bias
# self-attention output
a__ : Union[str, Any] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
a__ : Any =xmod_layer.self_attn.out_proj.weight
a__ : str =xmod_layer.self_attn.out_proj.bias
a__ : Dict =xmod_layer.self_attn_layer_norm.weight
a__ : Any =xmod_layer.self_attn_layer_norm.bias
# intermediate
a__ : List[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
a__ : Any =xmod_layer.fca.weight
a__ : str =xmod_layer.fca.bias
# output
a__ : Union[str, Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
a__ : int =xmod_layer.fca.weight
a__ : str =xmod_layer.fca.bias
a__ : str =xmod_layer.final_layer_norm.weight
a__ : Optional[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
a__ : Union[str, Any] =xmod_layer.adapter_layer_norm.weight
a__ : List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
a__ : int =bert_output.adapter_modules[lang_code]
a__ : List[Any] =xmod_layer.adapter_modules[lang_code]
a__ : List[str] =from_adapter.fca.weight
a__ : List[Any] =from_adapter.fca.bias
a__ : Optional[int] =from_adapter.fca.weight
a__ : List[Any] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
a__ : Any =xmod_sent_encoder.layer_norm.weight
a__ : Tuple =xmod_sent_encoder.layer_norm.bias
if classification_head:
a__ : int =xmod.model.classification_heads["mnli"].dense.weight
a__ : Union[str, Any] =xmod.model.classification_heads["mnli"].dense.bias
a__ : List[str] =xmod.model.classification_heads["mnli"].out_proj.weight
a__ : Any =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
a__ : Optional[Any] =xmod.model.encoder.lm_head.dense.weight
a__ : Dict =xmod.model.encoder.lm_head.dense.bias
a__ : List[str] =xmod.model.encoder.lm_head.layer_norm.weight
a__ : Any =xmod.model.encoder.lm_head.layer_norm.bias
a__ : Dict =xmod.model.encoder.lm_head.weight
a__ : Optional[int] =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
a__ : Tuple =xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
a__ : List[Any] =model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
a__ : Optional[int] =xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
a__ : Any =xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
a__ : Any =torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
a__ : List[str] =torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCAmelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 95 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
return int((input_a, input_a).count(0 ) == 0 )
def _snake_case ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 96 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ :int = deprecated_arg[3:]
UpperCamelCase__ :int = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCamelCase__ :int = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCamelCase__ :List[str] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCamelCase__ :List[str] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCamelCase__ :Dict = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase_ )
_a = field(
default=A__ , metadata={'help': 'Name of TPU'} , )
_a = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_a = field(default=A__ , metadata={'help': 'Benchmark models in eager model.'} )
_a = field(
default=A__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCamelCase__ :List[str] = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase__ :Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase__ :Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase__ :Dict = None
return tpu
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase__ :Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCamelCase__ :Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCamelCase__ :List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0 | 97 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
def a_ ( ):
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
lowerCAmelCase__ : Any = generate_large_matrix()
lowerCAmelCase__ : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a_ ( lowerCamelCase ):
assert all(row == sorted(lowerCamelCase , reverse=lowerCamelCase ) for row in grid )
assert all(list(lowerCamelCase ) == sorted(lowerCamelCase , reverse=lowerCamelCase ) for col in zip(*lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase__ = (left + right) // 2
UpperCAmelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase__ = mid + 1
else:
UpperCAmelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(grid[0] )
for i in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCamelCase ) * len(grid[0] )) - total
def a_ ( lowerCamelCase ):
return len([number for row in grid for number in row if number < 0] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
for row in grid:
for i, number in enumerate(lowerCamelCase ):
if number < 0:
total += len(lowerCamelCase ) - i
break
return total
def a_ ( ):
from timeit import timeit
print('Running benchmarks' )
UpperCAmelCase__ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase__ = timeit(f'''{func}(grid=grid)''' , setup=lowerCamelCase , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 98 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase : List[str] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def A_ ( A__ ) -> int:
a__ : str = {}
state_dict.pop('pixel_mean' , A__ )
state_dict.pop('pixel_std' , A__ )
a__ : Any = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ : str = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
a__ : Any = int(re.match(A__ , A__ ).group(2 ) )
if layer_nb == 0:
a__ : List[str] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
a__ : Optional[int] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
a__ : List[str] = key.replace('layers.2' , 'proj_out' )
a__ : Optional[Any] = value
a__ : List[Any] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def A_ ( A__ , A__ , A__ , A__="ybelkada/segment-anything" ) -> str:
a__ : int = hf_hub_download(A__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
a__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
a__ : List[str] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
a__ : List[str] = SamConfig(
vision_config=A__ , )
elif "sam_vit_h" in model_name:
a__ : Tuple = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
a__ : Optional[int] = SamConfig(
vision_config=A__ , )
a__ : Tuple = torch.load(A__ , map_location='cpu' )
a__ : Optional[int] = replace_keys(A__ )
a__ : Union[str, Any] = SamImageProcessor()
a__ : Optional[Any] = SamProcessor(image_processor=A__ )
a__ : List[Any] = SamModel(A__ )
hf_model.load_state_dict(A__ )
a__ : Optional[Any] = hf_model.to('cuda' )
a__ : List[Any] = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
a__ : str = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
a__ : int = [[[400, 650]]]
a__ : List[str] = [[1]]
a__ : str = processor(images=np.array(A__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a__ : Union[str, Any] = hf_model(**A__ )
a__ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
a__ : Any = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a__ : Union[str, Any] = hf_model(**A__ )
a__ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
a__ : str = ((75, 275, 1725, 850),)
a__ : Union[str, Any] = processor(images=np.array(A__ ) , input_boxes=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a__ : List[Any] = hf_model(**A__ )
a__ : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
a__ : Dict = [[[400, 650], [800, 650]]]
a__ : List[str] = [[1, 1]]
a__ : Any = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a__ : Optional[Any] = hf_model(**A__ )
a__ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
lowercase : List[str] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
lowercase : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 99 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__magic_name__ = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = True):
__SCREAMING_SNAKE_CASE = {} # dictionary of lists
__SCREAMING_SNAKE_CASE = directed
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
self.adj_list[destination_vertex].append(lowerCAmelCase__)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__SCREAMING_SNAKE_CASE = [destination_vertex]
__SCREAMING_SNAKE_CASE = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__SCREAMING_SNAKE_CASE = [destination_vertex]
__SCREAMING_SNAKE_CASE = []
return self
def __repr__( self):
return pformat(self.adj_list)
| 100 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
if start < end:
lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase , lowercase = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase = new_pivot_index + 1
lowercase = a[new_pivot_index]
lowercase = a[index]
lowercase = temp
lowercase = a[new_pivot_index + 1]
lowercase = a[end]
lowercase = temp
return new_pivot_index + 1, count
lowercase__ :Optional[Any] = TemporaryFile()
lowercase__ :Tuple = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ :List[str] = 0, 1 # mean and standard deviation
lowercase__ :str = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
lowercase__ :int = np.load(outfile)
lowercase__ :Dict = len(M) - 1
lowercase__ :int = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 101 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger("""transformers.models.speecht5""")
SCREAMING_SNAKE_CASE : Any = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
SCREAMING_SNAKE_CASE : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
SCREAMING_SNAKE_CASE : Any = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
SCREAMING_SNAKE_CASE : int = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
SCREAMING_SNAKE_CASE : List[Any] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
SCREAMING_SNAKE_CASE : List[str] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
SCREAMING_SNAKE_CASE : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
SCREAMING_SNAKE_CASE : str = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] ) ->List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : str = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Dict = value
elif weight_type == "weight_v":
__snake_case : List[Any] = value
elif weight_type == "bias":
__snake_case : Dict = value
elif weight_type == "running_mean":
__snake_case : Tuple = value
elif weight_type == "running_var":
__snake_case : Dict = value
elif weight_type == "num_batches_tracked":
__snake_case : Any = value
else:
__snake_case : Dict = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase ( _snake_case : List[str] , _snake_case : int ) ->List[str]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__snake_case , __snake_case : Union[str, Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : Tuple = []
if task == "s2t":
__snake_case : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
__snake_case : Optional[Any] = MAPPING_S2T
__snake_case : int = IGNORE_KEYS_S2T
elif task == "t2s":
__snake_case : Union[str, Any] = None
__snake_case : Tuple = MAPPING_T2S
__snake_case : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__snake_case : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
__snake_case : Any = MAPPING_S2S
__snake_case : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(f"""{name} was ignored""" )
continue
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Dict = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__snake_case , __snake_case : List[str] = key.split('''.*.''' )
if prefix in name and suffix in name:
__snake_case : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__snake_case : Optional[int] = True
if "*" in mapped_key:
__snake_case : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Optional[Any] = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : str = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "bias" in name:
__snake_case : List[str] = '''bias'''
elif "weight" in name:
__snake_case : Optional[Any] = '''weight'''
elif "running_mean" in name:
__snake_case : Optional[Any] = '''running_mean'''
elif "running_var" in name:
__snake_case : Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
__snake_case : Union[str, Any] = '''num_batches_tracked'''
else:
__snake_case : str = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Union[str, Any] = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : int , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , ) ->int:
"""simple docstring"""
if config_path is not None:
__snake_case : str = SpeechTaConfig.from_pretrained(_snake_case )
else:
__snake_case : Union[str, Any] = SpeechTaConfig()
if task == "s2t":
__snake_case : int = config.max_text_positions
__snake_case : List[Any] = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
__snake_case : int = 1_876
__snake_case : List[str] = 600
__snake_case : Union[str, Any] = config.max_speech_positions
__snake_case : Union[str, Any] = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
__snake_case : Union[str, Any] = 1_876
__snake_case : Tuple = config.max_speech_positions
__snake_case : List[str] = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__snake_case : str = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__snake_case : Optional[Any] = AddedToken('''<mask>''' , lstrip=_snake_case , rstrip=_snake_case )
__snake_case : str = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__snake_case : str = SpeechTaFeatureExtractor()
__snake_case : int = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint['''model'''] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ : str = logging.get_logger(__name__)
A__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RealmTokenizer
def __init__( self : int , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=True , A_ : Optional[int]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Dict="[MASK]" , A_ : List[Any]=True , A_ : List[str]=None , **A_ : List[str] , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , A_) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(A_ , normalizer_state.pop('''type'''))
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase_ : Union[str, Any] = normalizer_class(**A_)
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , **A_ : Tuple):
lowerCAmelCase_ : List[str] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : str = text
lowerCAmelCase_ : int = kwargs.pop('''text_pair''' , A_)
lowerCAmelCase_ : str = kwargs.pop('''return_tensors''' , A_)
lowerCAmelCase_ : int = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A_):
if batch_text_pair is not None:
lowerCAmelCase_ : List[Any] = batch_text_pair[idx]
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = super().__call__(A_ , A_ , return_tensors=A_ , **A_)
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''')
lowerCAmelCase_ : List[str] = encoded_candidates.get('''attention_mask''')
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''token_type_ids''')
if encoded_input_ids is not None:
output_data["input_ids"].append(A_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_)
lowerCAmelCase_ : List[str] = {key: item for key, item in output_data.items() if len(A_) != 0}
return BatchEncoding(A_ , tensor_type=A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Tuple , A_ : List[Any]=None):
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : List[str] = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
| 103 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def _A ( A__ ):
"""simple docstring"""
return (position - 1) // 2
def _A ( A__ ):
"""simple docstring"""
return (2 * position) + 1
def _A ( A__ ):
"""simple docstring"""
return (2 * position) + 2
class lowercase_ (Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
__lowercase = []
__lowercase = {}
__lowercase = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : Any ):
return str(self.heap )
def SCREAMING_SNAKE_CASE ( self : str ):
# Check if the priority queue is empty
return self.elements == 0
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : T ,lowercase__ : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowercase = self.elements
self.elements += 1
self._bubble_up(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
__lowercase , __lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowercase , __lowercase = self.heap[0]
self._bubble_down(lowercase__ )
return elem
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : T ,lowercase__ : int ):
# Update the weight of the given key
__lowercase = self.position_map[elem]
__lowercase = (elem, weight)
if position > 0:
__lowercase = get_parent_position(lowercase__ )
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase__ )
else:
self._bubble_down(lowercase__ )
else:
self._bubble_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowercase = self.position_map[elem]
if curr_pos == 0:
return None
__lowercase = get_parent_position(lowercase__ )
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase__ ,lowercase__ )
return self._bubble_up(lowercase__ )
return None
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowercase = self.position_map[elem]
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase = get_child_left_position(lowercase__ )
__lowercase = get_child_right_position(lowercase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase__ ,lowercase__ )
return self._bubble_down(lowercase__ )
if child_left_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase__ ,lowercase__ )
return self._bubble_down(lowercase__ )
else:
return None
if child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase__ ,lowercase__ )
return self._bubble_down(lowercase__ )
return None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ,lowercase__ : int ):
# Swap the nodes at the given positions
__lowercase = self.heap[nodea_pos][0]
__lowercase = self.heap[nodea_pos][0]
__lowercase , __lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowercase = nodea_pos
__lowercase = nodea_pos
class lowercase_ (Generic[T] ):
"""simple docstring"""
def __init__( self : Any ):
__lowercase = {}
__lowercase = 0
def __repr__( self : Any ):
return str(self.connections )
def __len__( self : Any ):
return self.nodes
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowercase = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : T ,lowercase__ : T ,lowercase__ : int ):
# Add an edge between 2 nodes in the graph
self.add_node(lowercase__ )
self.add_node(lowercase__ )
__lowercase = weight
__lowercase = weight
def _A ( A__ , ):
"""simple docstring"""
__lowercase = {node: maxsize for node in graph.connections}
__lowercase = {node: None for node in graph.connections}
__lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A__ , A__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowercase = priority_queue.extract_min()
__lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A__ , dist[neighbour] )
__lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A__ , dist[neighbour] )
__lowercase = node
return dist, parent
| 104 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : str =True
lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge"""
lowerCamelCase : str ="""accelerate_sagemaker_execution_role"""
lowerCamelCase : int ="""hf-sm"""
lowerCamelCase : int ="""us-east-1"""
lowerCamelCase : Tuple =1
lowerCamelCase : Any ="""accelerate-sagemaker-1"""
lowerCamelCase : str ="""1.6"""
lowerCamelCase : Tuple ="""4.4"""
lowerCamelCase : Optional[int] ="""train.py"""
lowerCamelCase : Optional[Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCamelCase : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ )
assert isinstance(converted_args["do_train"] , lowerCAmelCase__ )
assert isinstance(converted_args["epochs"] , lowerCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ )
assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 105 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase_ : List[str] ,lowercase_ : str=1_3 ,lowercase_ : Optional[int]=7 ,lowercase_ : int=True ,lowercase_ : Dict=True ,lowercase_ : List[Any]=9_9 ,lowercase_ : Union[str, Any]=3_2 ,lowercase_ : Optional[Any]=5 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : Any=3_7 ,lowercase_ : List[str]="gelu" ,lowercase_ : List[str]=0.1 ,lowercase_ : Union[str, Any]=0.1 ,lowercase_ : List[Any]=5_0 ,lowercase_ : int=0.02 ,lowercase_ : str=True ,lowercase_ : Dict=None ,):
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : Dict = use_input_mask
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = scope
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self : Tuple ):
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
def __lowerCAmelCase ( self : List[Any] ):
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : str ,lowercase_ : Union[str, Any] ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : int = BertGenerationEncoder(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,attention_mask=lowercase_ )
lowerCAmelCase__ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ,lowercase_ : Any ,lowercase_ : Dict ,lowercase_ : Union[str, Any] ,**lowercase_ : Dict ,):
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Dict = BertGenerationEncoder(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = model(
lowercase_ ,attention_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,)
lowerCAmelCase__ : int = model(
lowercase_ ,attention_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Dict ,lowercase_ : List[str] ,lowercase_ : Any ,lowercase_ : Dict ,lowercase_ : Dict ,lowercase_ : List[Any] ,**lowercase_ : str ,):
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Optional[int] = BertGenerationDecoder(config=lowercase_ ).to(lowercase_ ).eval()
# first forward pass
lowerCAmelCase__ : Union[str, Any] = model(
lowercase_ ,attention_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,use_cache=lowercase_ ,)
lowerCAmelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowerCAmelCase__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
lowercase_ ,attention_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,output_hidden_states=lowercase_ ,)['''hidden_states'''][0]
lowerCAmelCase__ : str = model(
lowercase_ ,attention_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,past_key_values=lowercase_ ,output_hidden_states=lowercase_ ,)['''hidden_states'''][0]
# select random slice
lowerCAmelCase__ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ ,lowercase_ ,atol=1E-3 ) )
def __lowerCAmelCase ( self : Any ,lowercase_ : List[Any] ,lowercase_ : List[str] ,lowercase_ : Tuple ,lowercase_ : Any ,*lowercase_ : List[str] ,):
lowerCAmelCase__ : Optional[Any] = BertGenerationDecoder(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = model(lowercase_ ,attention_mask=lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Tuple = BertGenerationEncoderTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[Any] = '''bert'''
self.model_tester.create_and_check_model(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def __lowerCAmelCase ( self : str ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : str = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowercase_ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ : List[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(lowercase_ )[0]
lowerCAmelCase__ : Any = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : int = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowercase_ ,atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(lowercase_ )[0]
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowercase_ ,atol=1E-4 ) )
| 106 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
def __magic_name__ ( A : float, A : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 0 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A: Dict = logging.getLogger()
def _snake_case ( UpperCamelCase : Path , UpperCamelCase : list ):
UpperCAmelCase : str = """\n""".join(UpperCamelCase )
Path(UpperCamelCase ).open("""w""" ).writelines(UpperCamelCase )
A: int = "patrickvonplaten/t5-tiny-random"
A: Dict = "sshleifer/bart-tiny-random"
A: Tuple = "sshleifer/tiny-mbart"
A: Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
UpperCAmelCase : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase : Dict = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
UpperCAmelCase : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase : Dict = F"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(_SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
UpperCAmelCase : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase : int = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
UpperCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase : Optional[int] = str(tmp_dir / """scores.json""" )
UpperCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""en"""] )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""de"""] )
UpperCAmelCase : Tuple = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase : Union[str, Any] = F"\n run_eval_search.py\n {model}\n {str(_SCREAMING_SNAKE_CASE )}\n {str(_SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase : str = [""" num_beams | length_penalty""", model, """Best score args"""]
UpperCAmelCase : Union[str, Any] = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(_SCREAMING_SNAKE_CASE ) )
| 109 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _a ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
import pyspark
def generate_fn():
lowercase__ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
lowercase__ = df_with_partition_id.select('''*''' ).where(f'part_id = {partition_id}' ).drop('''part_id''' )
lowercase__ = partition_df.collect()
lowercase__ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _a ( _BaseExamplesIterable ):
def __init__( self: List[str] , UpperCamelCase_: "pyspark.sql.DataFrame" , UpperCamelCase_: Tuple=None , ) -> int:
"""simple docstring"""
lowercase__ = df
lowercase__ = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
yield from self.generate_examples_fn()
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: np.random.Generator ) -> "SparkExamplesIterable":
"""simple docstring"""
lowercase__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int , UpperCamelCase_: int ) -> "SparkExamplesIterable":
"""simple docstring"""
lowercase__ = self.split_shard_indices_by_worker(UpperCamelCase_ , UpperCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
return len(self.partition_order )
class _a ( datasets.DatasetBuilder ):
_lowercase : Tuple = SparkConfig
def __init__( self: Any , UpperCamelCase_: "pyspark.sql.DataFrame" , UpperCamelCase_: str = None , UpperCamelCase_: str = None , **UpperCamelCase_: Tuple , ) -> int:
"""simple docstring"""
import pyspark
lowercase__ = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase__ = df
lowercase__ = working_dir
super().__init__(
cache_dir=UpperCamelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
def create_cache_and_write_probe(UpperCamelCase_: Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase_ )
lowercase__ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: datasets.download.download_manager.DownloadManager ) -> Optional[int]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[str] ) -> List[Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(UpperCamelCase_: Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
lowercase__ = self.df.count()
lowercase__ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase__ = (
self.df.limit(UpperCamelCase_ )
.repartition(1 )
.mapInArrow(UpperCamelCase_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase__ = min(UpperCamelCase_ , int(approx_total_size / max_shard_size ) )
lowercase__ = self.df.repartition(UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
lowercase__ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
lowercase__ = os.path.join(self._working_dir , os.path.basename(UpperCamelCase_ ) ) if self._working_dir else fpath
lowercase__ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase__ = self.config.features
lowercase__ = self._writer_batch_size
lowercase__ = self._fs.storage_options
def write_arrow(UpperCamelCase_: Tuple ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase__ = pyspark.TaskContext().taskAttemptId()
lowercase__ = next(UpperCamelCase_ , UpperCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
lowercase__ = 0
lowercase__ = writer_class(
features=UpperCamelCase_ , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=UpperCamelCase_ , storage_options=UpperCamelCase_ , embed_local_files=UpperCamelCase_ , )
lowercase__ = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase__ , lowercase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
lowercase__ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=UpperCamelCase_ , storage_options=UpperCamelCase_ , embed_local_files=UpperCamelCase_ , )
lowercase__ = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase_ )
if writer._num_bytes > 0:
lowercase__ , lowercase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase_ ) ):
lowercase__ = os.path.join(os.path.dirname(UpperCamelCase_ ) , os.path.basename(UpperCamelCase_ ) )
shutil.move(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = (
self.df.mapInArrow(UpperCamelCase_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase_ ( self: Any , UpperCamelCase_: "datasets.SplitGenerator" , UpperCamelCase_: str = "arrow" , UpperCamelCase_: Optional[Union[str, int]] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[int] , ) -> str:
"""simple docstring"""
self._validate_cache_dir()
lowercase__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase_ )
lowercase__ = not is_remote_filesystem(self._fs )
lowercase__ = os.path.join if is_local else posixpath.join
lowercase__ = '''-TTTTT-SSSSS-of-NNNNN'''
lowercase__ = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowercase__ = path_join(self._output_dir , UpperCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = []
lowercase__ = []
for task_id, content in self._prepare_split_single(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase_ )
lowercase__ = total_num_examples
lowercase__ = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
lowercase__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , ):
rename(
UpperCamelCase_ , fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , f'{global_shard_id:05d}' ).replace('''NNNNN''' , f'{total_shards:05d}' ) , )
lowercase__ = []
lowercase__ = 0
for i in range(len(UpperCamelCase_ ) ):
lowercase__ , lowercase__ = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase_ , len(UpperCamelCase_ ) ).map(lambda UpperCamelCase_ : _rename_shard(*UpperCamelCase_ ) ).collect()
else:
# don't use any pattern
lowercase__ = 0
lowercase__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace(UpperCamelCase_ , '''''' ) , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 110 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_a = logging.getLogger(__name__)
class A_ ( snake_case__ ):
_lowercase : List[Any] = 'sequence-classification'
def __init__( self : List[str] , UpperCAmelCase : int ) -> Union[str, Any]:
if type(UpperCAmelCase ) == dict:
__lowerCAmelCase: Union[str, Any] = Namespace(**UpperCAmelCase )
__lowerCAmelCase: str = glue_output_modes[hparams.task]
__lowerCAmelCase: Any = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCAmelCase , UpperCAmelCase , self.mode )
def UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase : List[Any] ) -> List[Any]:
return self.model(**UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int ) -> Optional[Any]:
__lowerCAmelCase: List[str] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase: Dict = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCAmelCase: List[Any] = self(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = outputs[0]
__lowerCAmelCase: Dict = self.trainer.lr_schedulers[0]['scheduler']
__lowerCAmelCase: List[str] = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCAmelCase ( self : Tuple ) -> Dict:
__lowerCAmelCase: Union[str, Any] = self.hparams
__lowerCAmelCase: Optional[Any] = processors[args.task]()
__lowerCAmelCase: Tuple = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCAmelCase: Union[str, Any] = self._feature_file(UpperCAmelCase )
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , UpperCAmelCase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
__lowerCAmelCase: Tuple = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__lowerCAmelCase: List[str] = convert_examples_to_features(
UpperCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] = False ) -> DataLoader:
__lowerCAmelCase: List[str] = 'dev' if mode == 'test' else mode
__lowerCAmelCase: Dict = self._feature_file(UpperCAmelCase )
logger.info('Loading features from cached file %s' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.load(UpperCAmelCase )
__lowerCAmelCase: int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCAmelCase: Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCAmelCase: Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase: Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase: int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , batch_size=UpperCAmelCase , shuffle=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ) -> int:
__lowerCAmelCase: Optional[int] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase: Dict = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCAmelCase: List[str] = self(**UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: List[str] = outputs[:2]
__lowerCAmelCase: int = logits.detach().cpu().numpy()
__lowerCAmelCase: List[str] = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> tuple:
__lowerCAmelCase: Any = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__lowerCAmelCase: int = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase: List[Any] = np.argmax(UpperCAmelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase: List[Any] = np.squeeze(UpperCAmelCase )
__lowerCAmelCase: List[Any] = np.concatenate([x['target'] for x in outputs] , axis=0 )
__lowerCAmelCase: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase: List[Any] = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCAmelCase , UpperCAmelCase )}
__lowerCAmelCase: List[Any] = dict(results.items() )
__lowerCAmelCase: int = results
return ret, preds_list, out_label_list
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict ) -> dict:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self._eval_end(UpperCAmelCase )
__lowerCAmelCase: Tuple = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase ( self : Any , UpperCAmelCase : Any ) -> dict:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._eval_end(UpperCAmelCase )
__lowerCAmelCase: Tuple = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> List[Any]:
BaseTransformer.add_model_specific_args(UpperCAmelCase , UpperCAmelCase )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=UpperCAmelCase , required=UpperCAmelCase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=UpperCAmelCase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def _a ( ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Any = argparse.ArgumentParser()
add_generic_args(__snake_case , os.getcwd() )
__lowerCAmelCase: Optional[Any] = GLUETransformer.add_model_specific_args(__snake_case , os.getcwd() )
__lowerCAmelCase: str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCAmelCase: Any = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__lowerCAmelCase: List[Any] = GLUETransformer(__snake_case )
__lowerCAmelCase: Dict = generic_train(__snake_case , __snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCAmelCase: Optional[int] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__snake_case ) )
__lowerCAmelCase: int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__snake_case )
if __name__ == "__main__":
main()
| 322 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : Tuple = 16
__snake_case : Dict = 32
def _UpperCAmelCase ( a__ , a__ = 1_6):
'''simple docstring'''
a_ : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""")
a_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(a__):
# max_length=None => use the model max length (it's actually the default)
a_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a_ : Dict = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(a__):
# On TPU it's best to pad everything to the same length or training will be very slow.
a_ : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a_ : List[str] = 1_6
elif accelerator.mixed_precision != "no":
a_ : Dict = 8
else:
a_ : str = None
return tokenizer.pad(
__snake_case , padding="""longest""" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="""pt""" , )
# Instantiate dataloaders.
a_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case)
a_ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : str = config["""lr"""]
a_ : str = int(config["""num_epochs"""])
a_ : Tuple = int(config["""seed"""])
a_ : Any = int(config["""batch_size"""])
a_ : Tuple = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a_ : str = batch_size // MAX_GPU_BATCH_SIZE
a_ : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case)
a_ , a_ : List[Any] = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : int = model.to(accelerator.device)
# Instantiate optimizer
a_ : Any = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
a_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(__snake_case) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : List[str] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a_ : List[str] = model(**__snake_case)
a_ : Union[str, Any] = outputs.loss
a_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a_ : List[str] = model(**__snake_case)
a_ : Optional[int] = outputs.logits.argmax(dim=-1)
a_ , a_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
a_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __snake_case)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Any = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=__snake_case , default=__snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a_ : Union[str, Any] = parser.parse_args()
a_ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 248 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A : Union[str, Any] = None
A : Tuple = logging.get_logger(__name__)
A : Dict = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
A : Union[str, Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
A : Union[str, Any] = "▁"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
A__ = BarthezTokenizer
def __init__( self : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Union[str, Any]="<unk>" , __lowerCamelCase : Dict="<pad>" , __lowerCamelCase : str="<mask>" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
lowerCamelCase__ : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : Union[str, Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 184 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
from manim import *
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Any = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : Tuple = Rectangle(height=0.2_5 , width=0.2_5 )
SCREAMING_SNAKE_CASE : int = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : int = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Any = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Any = Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : List[str] = Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE : List[Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = fill.copy().set_fill(_lowerCamelCase , opacity=0.8 )
target.move_to(_lowerCamelCase )
model_arr.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCamelCase )
self.add(*_lowerCamelCase , *_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text('''Disk''' , font_size=24 )
SCREAMING_SNAKE_CASE : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : List[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = Square(0.3 )
input.set_fill(_lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCamelCase , buff=0.5 )
self.play(Write(_lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCamelCase , buff=0.0_2 )
self.play(MoveToTarget(_lowerCamelCase ) )
self.play(FadeOut(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = Arrow(start=_lowerCamelCase , end=_lowerCamelCase , color=_lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Dict = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) )
SCREAMING_SNAKE_CASE : str = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(_lowerCamelCase ) , Circumscribe(model_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : List[str] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
SCREAMING_SNAKE_CASE : str = AnimationGroup(
FadeOut(_lowerCamelCase , run_time=0.5 ) , MoveToTarget(_lowerCamelCase , run_time=0.5 ) , FadeIn(_lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : List[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Tuple = a_c
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_lowerCamelCase ) , FadeOut(_lowerCamelCase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : str = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) , MoveToTarget(_lowerCamelCase ) )
self.wait()
| 313 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from typing import Any
import numpy as np
def A (__A : Any ) -> bool:
"""simple docstring"""
return np.array_equal(__snake_case , matrix.conjugate().T )
def A (__A : Tuple , __A : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = v.conjugate().T
UpperCAmelCase_ = v_star.dot(__snake_case )
assert isinstance(__snake_case , np.ndarray )
return (v_star_dot.dot(__snake_case )) / (v_star.dot(__snake_case ))
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCAmelCase_ = np.array([[1], [2], [3]] )
assert is_hermitian(__snake_case ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__snake_case , __snake_case ) )
UpperCAmelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__snake_case ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__snake_case , __snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 51 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = [[] for _ in range(__snake_case )]
__lowerCamelCase : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__snake_case ) <= key:
return input_string
for position, character in enumerate(__snake_case ):
__lowerCamelCase : Tuple = position % (lowest * 2) # puts it in bounds
__lowerCamelCase : List[Any] = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__snake_case )
__lowerCamelCase : Optional[int] = [''.join(__snake_case ) for row in temp_grid]
__lowerCamelCase : List[str] = ''.join(__snake_case )
return output_string
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__lowerCamelCase : Dict = [[] for _ in range(__snake_case )] # generates template
for position in range(len(__snake_case ) ):
__lowerCamelCase : Tuple = position % (lowest * 2) # puts it in bounds
__lowerCamelCase : Optional[Any] = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__lowerCamelCase : Optional[int] = 0
for row in temp_grid: # fills in the characters
__lowerCamelCase : List[Any] = input_string[counter : counter + len(__snake_case )]
grid.append(list(__snake_case ) )
counter += len(__snake_case )
__lowerCamelCase : Any = '' # reads as zigzag
for position in range(len(__snake_case ) ):
__lowerCamelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
__lowerCamelCase : Tuple = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> dict[int, str]:
__lowerCamelCase : Union[str, Any] = {}
for key_guess in range(1 , len(__snake_case ) ): # tries every key
__lowerCamelCase : List[Any] = decrypt(__snake_case , __snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ):
@register_to_config
def __init__( self : Dict , A : str , A : Optional[Any] = None , A : List[str] = None ) ->str:
super().__init__()
lowerCamelCase__ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCamelCase__ : Tuple = torch.zeros(A , A )
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = torch.nn.Parameter(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = 4_2
_UpperCAmelCase : Optional[Any] = 4_2
_UpperCAmelCase : List[str] = 4_2
_UpperCAmelCase : List[Any] = 4_2
_UpperCAmelCase : str = 4_2
_UpperCAmelCase : Tuple = 4_2
def __init__( self : Tuple , A : List[Any] , A : Optional[Any] , A : Dict , A : Dict , A : Union[str, Any] , A : str , ) ->int:
super().__init__()
self.register_modules(
vqvae=A , transformer=A , text_encoder=A , tokenizer=A , scheduler=A , learned_classifier_free_sampling_embeddings=A , )
def __lowerCamelCase ( self : Optional[Any] , A : int , A : List[str] , A : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : int = len(A ) if isinstance(A , A ) else 1
# get prompt text embeddings
lowerCamelCase__ : Any = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCamelCase__ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCamelCase__ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase__ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCamelCase__ : str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A )
# duplicate text embeddings for each generation per prompt
lowerCamelCase__ : Any = prompt_embeds.repeat_interleave(A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCamelCase__ : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCamelCase__ : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(A , 1 , 1 )
else:
lowerCamelCase__ : str = [''''''] * batch_size
lowerCamelCase__ : List[Any] = text_input_ids.shape[-1]
lowerCamelCase__ : Dict = self.tokenizer(
A , padding='''max_length''' , max_length=A , truncation=A , return_tensors='''pt''' , )
lowerCamelCase__ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCamelCase__ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ : Dict = negative_prompt_embeds.shape[1]
lowerCamelCase__ : List[Any] = negative_prompt_embeds.repeat(1 , A , 1 )
lowerCamelCase__ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , A : List[str] , A : List[Any] = 1_0_0 , A : Optional[Any] = 5.0 , A : List[Any] = 1.0 , A : Any = 1 , A : Union[str, Any] = None , A : List[str] = None , A : Tuple = "pil" , A : Union[str, Any] = True , A : Optional[int] = None , A : Any = 1 , ) ->Union[ImagePipelineOutput, Tuple]:
if isinstance(A , A ):
lowerCamelCase__ : Dict = 1
elif isinstance(A , A ):
lowerCamelCase__ : Optional[Any] = len(A )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(A )}" )
lowerCamelCase__ : Optional[int] = batch_size * num_images_per_prompt
lowerCamelCase__ : str = guidance_scale > 1.0
lowerCamelCase__ : Any = self._encode_prompt(A , A , A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(A )}." )
# get the initial completely masked latents unless the user supplied it
lowerCamelCase__ : Union[str, Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCamelCase__ : int = self.transformer.num_vector_embeds - 1
lowerCamelCase__ : str = torch.full(A , A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
lowerCamelCase__ : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A , device=self.device )
lowerCamelCase__ : Dict = self.scheduler.timesteps.to(self.device )
lowerCamelCase__ : Optional[int] = latents
for i, t in enumerate(self.progress_bar(A ) ):
# expand the sample if we are doing classifier free guidance
lowerCamelCase__ : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCamelCase__ : str = self.transformer(A , encoder_hidden_states=A , timestep=A ).sample
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model_output.chunk(2 )
lowerCamelCase__ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A , dim=1 , keepdim=A )
lowerCamelCase__ : str = self.truncate(A , A )
# remove `log(0)`'s (`-inf`s)
lowerCamelCase__ : Dict = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Dict = self.scheduler.step(A , timestep=A , sample=A , generator=A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
lowerCamelCase__ : List[str] = self.vqvae.config.vq_embed_dim
lowerCamelCase__ : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCamelCase__ : Any = self.vqvae.quantize.get_codebook_entry(A , shape=A )
lowerCamelCase__ : int = self.vqvae.decode(A , force_not_quantize=A ).sample
lowerCamelCase__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : List[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
def __lowerCamelCase ( self : str , A : str , A : Any ) ->torch.FloatTensor:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.sort(A , 1 , descending=A )
lowerCamelCase__ : Any = torch.exp(A )
lowerCamelCase__ : str = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCamelCase__ : Dict = torch.full_like(keep_mask[:, 0:1, :] , A )
lowerCamelCase__ : Any = torch.cat((all_true, keep_mask) , dim=1 )
lowerCamelCase__ : str = keep_mask[:, :-1, :]
lowerCamelCase__ : Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
lowerCamelCase__ : List[Any] = log_p_x_0.clone()
lowerCamelCase__ : Dict = -torch.inf # -inf = log(0)
return rv
| 142 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : int = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __snake_case, __snake_case ):
_UpperCAmelCase : Tuple = False
return [i for i in range(2, __snake_case ) if is_prime[i]]
def __UpperCAmelCase ( a_: str = 10**8 ):
_UpperCAmelCase : int = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }') | 145 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 120 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = math.inf , SCREAMING_SNAKE_CASE__ = -math.inf , SCREAMING_SNAKE_CASE__ = math.inf , SCREAMING_SNAKE_CASE__ = -math.inf , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = 0.01 , SCREAMING_SNAKE_CASE__ = 1 , ) -> Any:
__snake_case: Dict = False
__snake_case: Optional[Any] = search_prob
__snake_case: Union[str, Any] = start_temperate
__snake_case: Dict = []
__snake_case: Optional[int] = 0
__snake_case: List[str] = None
while not search_end:
__snake_case: Any = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case: Tuple = current_state
scores.append(__snake_case)
iterations += 1
__snake_case: Optional[Any] = None
__snake_case: List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case: Optional[int] = random.randint(0 , len(__snake_case) - 1) # picking a random neighbor
__snake_case: Optional[Any] = neighbors.pop(__snake_case)
__snake_case: List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case: Optional[int] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case: Any = picked_neighbor
else:
__snake_case: Any = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case: Tuple = picked_neighbor
__snake_case: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case: Optional[Any] = True
else:
__snake_case: List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case) , __snake_case)
plt.xlabel("""Iterations""")
plt.ylabel("""Function values""")
plt.show()
return best_state
if __name__ == "__main__":
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__UpperCAmelCase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__UpperCAmelCase : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
return (3 * x**2) - (6 * y)
__UpperCAmelCase : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
__UpperCAmelCase : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__UpperCAmelCase : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 111 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
a__ : Optional[Any] =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
a__ : Dict =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
a__ : int =self.get_masked_index(lowerCAmelCase__ )
a__ : Dict =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
a__ : Optional[int] =self.framework
a__ : Optional[Any] =self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.ensure_exactly_one_mask_token(lowerCAmelCase__ )
return model_inputs
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self.model(**lowerCAmelCase__ )
a__ : Optional[int] =model_inputs["input_ids"]
return model_outputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None ) -> Tuple:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
a__ : Optional[Any] =target_ids.shape[0]
a__ : Tuple =model_outputs["input_ids"][0]
a__ : Union[str, Any] =model_outputs["logits"]
if self.framework == "tf":
a__ : Optional[Any] =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
a__ : Any =outputs.numpy()
a__ : List[Any] =outputs[0, masked_index, :]
a__ : Tuple =stable_softmax(lowerCAmelCase__ , axis=-1 )
if target_ids is not None:
a__ : Optional[Any] =tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
a__ : str =tf.expand_dims(lowerCAmelCase__ , 0 )
a__ : str =tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
a__ , a__ : Union[str, Any] =topk.values.numpy(), topk.indices.numpy()
else:
a__ : Optional[int] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
a__ : Tuple =outputs[0, masked_index, :]
a__ : List[str] =logits.softmax(dim=-1 )
if target_ids is not None:
a__ : Dict =probs[..., target_ids]
a__ , a__ : Any =probs.topk(lowerCAmelCase__ )
a__ : List[Any] =[]
a__ : Union[str, Any] =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
a__ : Optional[Any] =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
a__ : Optional[Any] =input_ids.numpy().copy()
if target_ids is not None:
a__ : Any =target_ids[p].tolist()
a__ : Tuple =p
# Filter padding out:
a__ : Optional[Any] =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
a__ : int =self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
a__ : Tuple ={"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
if single_mask:
return result[0]
return result
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[Any] =[targets]
try:
a__ : int =self.tokenizer.get_vocab()
except Exception:
a__ : str ={}
a__ : Optional[Any] =[]
for target in targets:
a__ : List[str] =vocab.get(lowerCAmelCase__ , lowerCAmelCase__ )
if id_ is None:
a__ : Tuple =self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["input_ids"]
if len(lowerCAmelCase__ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
a__ : Optional[Any] =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
a__ : List[str] =list(set(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) == 0:
raise ValueError("At least one target must be provided when passed." )
a__ : Optional[int] =np.array(lowerCAmelCase__ )
return target_ids
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> List[str]:
'''simple docstring'''
a__ : Tuple ={}
if targets is not None:
a__ : Optional[int] =self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =target_ids
if top_k is not None:
a__ : int =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Any =super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 95 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_a = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_a = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: int = list(state_dict.keys() )
for name in state_dict_keys:
__lowerCAmelCase: Union[str, Any] = state_dict.pop(__snake_case )
# emb -> embedding
if name.startswith('emb.' ):
__lowerCAmelCase: Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowerCAmelCase: List[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowerCAmelCase: Optional[int] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __snake_case )
# ffn -> feed_forward
__lowerCAmelCase: Optional[Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowerCAmelCase: int = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowerCAmelCase: Optional[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowerCAmelCase: str = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowerCAmelCase: Any = 'rwkv.' + name
__lowerCAmelCase: List[Any] = weight
return state_dict
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowerCAmelCase: Optional[Any] = 5_02_77
__lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowerCAmelCase: Optional[int] = PreTrainedTokenizerFast(tokenizer_file=__snake_case )
__lowerCAmelCase: Optional[Any] = len(__snake_case )
tokenizer.save_pretrained(__snake_case )
# 2. Build the config
__lowerCAmelCase: Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowerCAmelCase: List[str] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__lowerCAmelCase: Union[str, Any] = RwkvConfig(
vocab_size=__snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__snake_case )
# 3. Download model file then convert state_dict
__lowerCAmelCase: Optional[int] = hf_hub_download(__snake_case , __snake_case )
__lowerCAmelCase: Optional[Any] = torch.load(__snake_case , map_location='cpu' )
__lowerCAmelCase: Optional[int] = convert_state_dict(__snake_case )
# 4. Split in shards and save
__lowerCAmelCase , __lowerCAmelCase: List[Any] = shard_checkpoint(__snake_case )
for shard_file, shard in shards.items():
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if index is not None:
__lowerCAmelCase: Tuple = os.path.join(__snake_case , __snake_case )
# Save the index as well
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
__lowerCAmelCase: Optional[int] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '\n'
f.write(__snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowerCAmelCase: int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowerCAmelCase: Union[str, Any] = torch.load(os.path.join(__snake_case , __snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__snake_case , __snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowerCAmelCase: str = AutoModelForCausalLM.from_pretrained(__snake_case )
model.push_to_hub(__snake_case , max_shard_size='2GB' )
tokenizer.push_to_hub(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import argparse
import struct
import unittest
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
a_ : Union[str, Any] = data
# Initialize hash values
a_ : Tuple = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
a_ : Tuple = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
a_ : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> bytes:
a_ : Tuple = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowercase ) + 8) % 64))
a_ : Optional[int] = struct.pack(""">Q""" , (len(_lowercase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase__ ( self ) -> None:
# Convert into blocks of 64 bytes
a_ : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a_ : int = list(struct.unpack(""">16L""" , _lowercase ) )
# add 48 0-ed integers
words += [0] * 48
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ : Optional[int] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
a_ : Tuple = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
a_ : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
a_ : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
a_ : int = self.ror(_lowercase , 6 ) ^ self.ror(_lowercase , 11 ) ^ self.ror(_lowercase , 25 )
a_ : Dict = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
a_ : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
a_ : List[str] = self.ror(_lowercase , 2 ) ^ self.ror(_lowercase , 13 ) ^ self.ror(_lowercase , 22 )
a_ : Dict = (a & b) ^ (a & c) ^ (b & c)
a_ : List[str] = (sa + maj) % 0X1_00_00_00_00
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ : Any = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
a_ : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
a_ : str = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
a_ : Any = """""".join([hex(_lowercase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> int:
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
import hashlib
a_ : List[Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_lowercase ).hash , hashlib.shaaaa(_lowercase ).hexdigest() )
def _UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
a_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""")
a_ : List[str] = parser.parse_args()
a_ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
a_ : str = f.read()
else:
a_ : List[Any] = bytes(__snake_case , """utf-8""")
print(SHAaaa(__snake_case).hash)
if __name__ == "__main__":
main()
| 248 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
SCREAMING_SNAKE_CASE : str = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__snake_case )
if number < 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE : Tuple = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE : Dict = [3, 5]
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Dict = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : List[Any] = 0
try:
a__ : Optional[Any] = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 313 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A (__A : Union[str, Any] , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__snake_case , map_location='''cpu''' )
UpperCAmelCase_ = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ = v
else:
UpperCAmelCase_ = v
UpperCAmelCase_ = chkpt['''params''']
UpperCAmelCase_ = {n: v for n, v in config.items() if not isinstance(__snake_case , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ = chkpt['''dico_word2id''']
UpperCAmelCase_ = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__snake_case , __snake_case )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , indent=2 ) + '''\n''' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 51 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : int=3 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : List[str]=4 ,SCREAMING_SNAKE_CASE__ : str=3_7 ,SCREAMING_SNAKE_CASE__ : int="gelu" ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=1_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,):
__lowerCamelCase : str = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : str = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Dict = is_training
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = type_sequence_label_size
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Any = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 1
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Optional[Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[int]):
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : int = ViTMSNModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[Any] = self.type_sequence_label_size
__lowerCamelCase : List[Any] = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_UpperCAmelCase : int = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Any = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Union[str, Any] = ViTMSNModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Tuple):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def lowerCAmelCase ( self : List[str]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,nn.Linear))
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : str = ViTMSNModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[Any]):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(2)
__lowerCamelCase : Tuple = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.default_image_processor
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Dict = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = torch.tensor([-0.0803, -0.4454, -0.2375]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Union[str, Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Optional[int] = '''BlipImageProcessor'''
UpperCamelCase_ : Tuple = '''AutoTokenizer'''
def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
# add QFormer tokenizer
_UpperCAmelCase : Union[str, Any] = qformer_tokenizer
def __call__( self : Optional[Any] , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Union[str, Any] = True , lowerCAmelCase__ : List[str] = False , lowerCAmelCase__ : str = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : List[str] = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Optional[int] = False , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : Union[str, Any] = False , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : Optional[int] = False , lowerCAmelCase__ : Dict = True , lowerCAmelCase__ : List[str] = None , **lowerCAmelCase__ : Dict , ) -> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_UpperCAmelCase : int = BatchFeature()
if text is not None:
_UpperCAmelCase : Optional[Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
encoding.update(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.qformer_tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
_UpperCAmelCase : Union[str, Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_UpperCAmelCase : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def _lowerCAmelCase ( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : int , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase__ )
return super().save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ , subfolder="qformer_tokenizer" )
_UpperCAmelCase : int = cls._get_arguments_from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
args.append(lowerCAmelCase__ )
return cls(*lowerCAmelCase__ ) | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 0 |
import copy
import re
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = """hp"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A : List[Any] , A : Tuple ):
__snake_case: str = prefix
__snake_case: List[Any] = defaults
cls.build_naming_info()
@staticmethod
def UpperCAmelCase__ ( A : Union[str, Any] , A : List[Any] ):
if len(A ) == 0:
return ""
__snake_case: Dict = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(A ) + 1 ):
__snake_case: Optional[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__snake_case: Union[str, Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A : int ):
__snake_case: str = """"""
while integer != 0:
__snake_case: List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
__snake_case: Tuple = 0
while True:
__snake_case: str = word + """#""" + int_to_alphabetic(A )
if sword in info["reverse_short_word"]:
continue
else:
__snake_case: Tuple = sword
break
__snake_case: Any = short_word
__snake_case: str = word
return short_word
@staticmethod
def UpperCAmelCase__ ( A : str , A : Any ):
__snake_case: Optional[int] = param_name.split("""_""" )
__snake_case: Tuple = [TrialShortNamer.shortname_for_word(A , A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__snake_case: Optional[int] = ["""""", """_"""]
for separator in separators:
__snake_case: Union[str, Any] = separator.join(A )
if shortname not in info["reverse_short_param"]:
__snake_case: Optional[Any] = shortname
__snake_case: Tuple = param_name
return shortname
return param_name
@staticmethod
def UpperCAmelCase__ ( A : Any , A : List[str] ):
__snake_case: Dict = TrialShortNamer.shortname_for_key(A , A )
__snake_case: List[Any] = short_name
__snake_case: Union[str, Any] = param_name
@classmethod
def UpperCAmelCase__ ( cls : Any ):
if cls.NAMING_INFO is not None:
return
__snake_case: Optional[Any] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
__snake_case: List[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A , A )
__snake_case: Dict = info
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A : List[str] ):
cls.build_naming_info()
assert cls.PREFIX is not None
__snake_case: Dict = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__snake_case: Tuple = cls.NAMING_INFO["""short_param"""][k]
if isinstance(A , A ):
__snake_case: Tuple = 1 if v else 0
__snake_case: Any = """""" if isinstance(A , (int, float) ) else """-"""
__snake_case: Dict = f'''{key}{sep}{v}'''
name.append(A )
return "_".join(A )
@classmethod
def UpperCAmelCase__ ( cls : int , A : Optional[Any] ):
__snake_case: Any = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__snake_case: Tuple = []
else:
__snake_case: str = repr.split("""_""" )
__snake_case: Union[str, Any] = {}
for value in values:
if "-" in value:
__snake_case , __snake_case: Union[str, Any] = value.split("""-""" )
else:
__snake_case: Union[str, Any] = re.sub("""[0-9.]""" , """""" , A )
__snake_case: Tuple = float(re.sub("""[^0-9.]""" , """""" , A ) )
__snake_case: Any = cls.NAMING_INFO["""reverse_short_param"""][p_k]
__snake_case: Optional[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__snake_case: List[str] = cls.DEFAULTS[k]
return parameters
| 111 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : Any =parent
a__ : List[str] =batch_size
a__ : Dict =seq_length
a__ : Optional[int] =is_training
a__ : List[str] =use_input_mask
a__ : List[Any] =use_token_type_ids
a__ : List[str] =use_labels
a__ : Optional[Any] =vocab_size
a__ : Union[str, Any] =hidden_size
a__ : List[Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =intermediate_size
a__ : Any =hidden_act
a__ : Tuple =hidden_dropout_prob
a__ : Optional[Any] =attention_probs_dropout_prob
a__ : Optional[Any] =max_position_embeddings
a__ : List[str] =type_vocab_size
a__ : Union[str, Any] =type_sequence_label_size
a__ : List[Any] =initializer_range
a__ : Tuple =num_labels
a__ : Any =num_choices
a__ : Optional[int] =scope
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple =None
if self.use_input_mask:
a__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
a__ : Union[str, Any] =None
if self.use_token_type_ids:
a__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Union[str, Any] =None
a__ : List[Any] =None
a__ : Any =None
if self.use_labels:
a__ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[str] =ids_tensor([self.batch_size] , self.num_choices )
a__ : List[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : str =LlamaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Union[str, Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a__ : Optional[Any] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
a__ : str =True
a__ : Dict =LlamaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
a__ : Optional[Any] =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict =LlamaForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =True
a__ : Optional[Any] =True
a__ : List[Any] =LlamaForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a__ : int =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
a__ : List[Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ : List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[str] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ : Optional[int] =torch.cat([input_ids, next_tokens] , dim=-1 )
a__ : int =torch.cat([input_mask, next_mask] , dim=-1 )
a__ : int =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
a__ : Union[str, Any] =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a__ : Optional[Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ : List[str] =output_from_no_past[:, -3:, random_slice_idx].detach()
a__ : Any =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] =config_and_inputs
a__ : str ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowercase : Union[str, Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[Any] = False
_lowercase : Optional[Any] = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] =LlamaModelTester(self )
a__ : Dict =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Dict =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Dict =type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =3
a__ : Dict =input_dict["input_ids"]
a__ : str =input_ids.ne(1 ).to(lowerCAmelCase__ )
a__ : int =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ : Union[str, Any] =LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] =3
a__ : List[str] ="single_label_classification"
a__ : int =input_dict["input_ids"]
a__ : Optional[Any] =input_ids.ne(1 ).to(lowerCAmelCase__ )
a__ : Dict =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ : List[str] =LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ , a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple =3
a__ : List[str] ="multi_label_classification"
a__ : Optional[int] =input_dict["input_ids"]
a__ : Dict =input_ids.ne(1 ).to(lowerCAmelCase__ )
a__ : List[Any] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a__ : List[str] =LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : int =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple =ids_tensor([1, 1_0] , config.vocab_size )
a__ : Optional[int] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ : List[Any] =LlamaModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
a__ : Union[str, Any] =original_model(lowerCAmelCase__ ).last_hidden_state
a__ : Dict =original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ : List[str] ={"type": scaling_type, "factor": 10.0}
a__ : Optional[Any] =LlamaModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
a__ : List[str] =scaled_model(lowerCAmelCase__ ).last_hidden_state
a__ : List[str] =scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : int =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ : Dict =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a__ : Optional[Any] =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a__ : Any =torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ : int =torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Tuple =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ : int =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a__ : List[Any] =model(torch.tensor(lowerCAmelCase__ ) )
# Expected mean on dim = -1
a__ : Any =torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ : Optional[int] =torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ : Optional[int] =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a__ : Union[str, Any] =model(torch.tensor(lowerCAmelCase__ ) )
# Expected mean on dim = -1
a__ : Tuple =torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ : List[str] =torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ : List[Any] =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a__ : List[Any] =model(torch.tensor(lowerCAmelCase__ ) )
a__ : Tuple =torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
a__ : Union[str, Any] =torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] ="Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a__ : str ="Simply put, the theory of relativity states that "
a__ : Tuple =LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a__ : Optional[Any] =tokenizer.encode(lowerCAmelCase__ , return_tensors="pt" )
a__ : Union[str, Any] =LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCAmelCase__ )
# greedy generation outputs
a__ : Union[str, Any] =model.generate(lowerCAmelCase__ , max_new_tokens=6_4 , top_p=lowerCAmelCase__ , temperature=1 , do_sample=lowerCAmelCase__ )
a__ : int =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( snake_case__ ):
_lowercase : List[Any] = 4_2
class A_ ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=("DownEncoderBlock2D",) , UpperCAmelCase : Dict=(6_4,) , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Optional[Any]=3_2 , UpperCAmelCase : List[Any]="silu" , UpperCAmelCase : Optional[int]=True , ) -> Any:
super().__init__()
__lowerCAmelCase: Any = layers_per_block
__lowerCAmelCase: Optional[Any] = torch.nn.Convad(
UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCAmelCase: Any = None
__lowerCAmelCase: Optional[Any] = nn.ModuleList([] )
# down
__lowerCAmelCase: Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase ):
__lowerCAmelCase: List[str] = output_channel
__lowerCAmelCase: Union[str, Any] = block_out_channels[i]
__lowerCAmelCase: Tuple = i == len(UpperCAmelCase ) - 1
__lowerCAmelCase: Tuple = get_down_block(
UpperCAmelCase , num_layers=self.layers_per_block , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , )
self.down_blocks.append(UpperCAmelCase )
# mid
__lowerCAmelCase: Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# out
__lowerCAmelCase: Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase , eps=1E-6 )
__lowerCAmelCase: Optional[Any] = nn.SiLU()
__lowerCAmelCase: Tuple = 2 * out_channels if double_z else out_channels
__lowerCAmelCase: Optional[int] = nn.Convad(block_out_channels[-1] , UpperCAmelCase , 3 , padding=1 )
__lowerCAmelCase: Union[str, Any] = False
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: int = x
__lowerCAmelCase: List[Any] = self.conv_in(UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Optional[Any] ):
def custom_forward(*UpperCAmelCase : List[Any] ):
return module(*UpperCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCAmelCase: Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
# middle
__lowerCAmelCase: Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
for down_block in self.down_blocks:
__lowerCAmelCase: Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase )
# middle
__lowerCAmelCase: Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase: Dict = down_block(UpperCAmelCase )
# middle
__lowerCAmelCase: List[Any] = self.mid_block(UpperCAmelCase )
# post-process
__lowerCAmelCase: Optional[int] = self.conv_norm_out(UpperCAmelCase )
__lowerCAmelCase: Dict = self.conv_act(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self.conv_out(UpperCAmelCase )
return sample
class A_ ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : List[str]=("UpDecoderBlock2D",) , UpperCAmelCase : int=(6_4,) , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=3_2 , UpperCAmelCase : str="silu" , UpperCAmelCase : Union[str, Any]="group" , ) -> int:
super().__init__()
__lowerCAmelCase: str = layers_per_block
__lowerCAmelCase: Optional[int] = nn.Convad(
UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCAmelCase: str = None
__lowerCAmelCase: List[str] = nn.ModuleList([] )
__lowerCAmelCase: str = in_channels if norm_type == 'spatial' else None
# mid
__lowerCAmelCase: str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# up
__lowerCAmelCase: Optional[int] = list(reversed(UpperCAmelCase ) )
__lowerCAmelCase: int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
__lowerCAmelCase: List[str] = output_channel
__lowerCAmelCase: Optional[int] = reversed_block_out_channels[i]
__lowerCAmelCase: Optional[Any] = i == len(UpperCAmelCase ) - 1
__lowerCAmelCase: Dict = get_up_block(
UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , resnet_time_scale_shift=UpperCAmelCase , )
self.up_blocks.append(UpperCAmelCase )
__lowerCAmelCase: List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase: Tuple = SpatialNorm(block_out_channels[0] , UpperCAmelCase )
else:
__lowerCAmelCase: List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase , eps=1E-6 )
__lowerCAmelCase: List[str] = nn.SiLU()
__lowerCAmelCase: Union[str, Any] = nn.Convad(block_out_channels[0] , UpperCAmelCase , 3 , padding=1 )
__lowerCAmelCase: List[Any] = False
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict=None ) -> Any:
__lowerCAmelCase: int = z
__lowerCAmelCase: Tuple = self.conv_in(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : List[str] ):
def custom_forward(*UpperCAmelCase : Dict ):
return module(*UpperCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCAmelCase: List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
__lowerCAmelCase: str = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCAmelCase: str = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
# middle
__lowerCAmelCase: Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCAmelCase: int = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
else:
# middle
__lowerCAmelCase: int = self.mid_block(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCAmelCase: List[str] = up_block(UpperCAmelCase , UpperCAmelCase )
# post-process
if latent_embeds is None:
__lowerCAmelCase: str = self.conv_norm_out(UpperCAmelCase )
else:
__lowerCAmelCase: Any = self.conv_norm_out(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.conv_act(UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.conv_out(UpperCAmelCase )
return sample
class A_ ( nn.Module ):
def __init__( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str=None , UpperCAmelCase : Tuple="random" , UpperCAmelCase : int=False , UpperCAmelCase : Any=True ) -> Tuple:
super().__init__()
__lowerCAmelCase: Optional[int] = n_e
__lowerCAmelCase: Any = vq_embed_dim
__lowerCAmelCase: Dict = beta
__lowerCAmelCase: List[str] = legacy
__lowerCAmelCase: Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCAmelCase: List[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase: Dict = self.used.shape[0]
__lowerCAmelCase: Union[str, Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase: Dict = self.re_embed
__lowerCAmelCase: Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__lowerCAmelCase: str = n_e
__lowerCAmelCase: int = sane_index_shape
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Optional[int] ) -> int:
__lowerCAmelCase: Union[str, Any] = inds.shape
assert len(UpperCAmelCase ) > 1
__lowerCAmelCase: Optional[int] = inds.reshape(ishape[0] , -1 )
__lowerCAmelCase: List[Any] = self.used.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase: Any = match.argmax(-1 )
__lowerCAmelCase: Any = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase: Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase: Dict = self.unknown_index
return new.reshape(UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[int] ) -> int:
__lowerCAmelCase: Dict = inds.shape
assert len(UpperCAmelCase ) > 1
__lowerCAmelCase: List[str] = inds.reshape(ishape[0] , -1 )
__lowerCAmelCase: Tuple = self.used.to(UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase: Optional[int] = 0 # simply set to zero
__lowerCAmelCase: Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase )
return back.reshape(UpperCAmelCase )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : str ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
__lowerCAmelCase: Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCAmelCase: Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase: List[Any] = torch.argmin(torch.cdist(UpperCAmelCase , self.embedding.weight ) , dim=1 )
__lowerCAmelCase: Tuple = self.embedding(UpperCAmelCase ).view(z.shape )
__lowerCAmelCase: Optional[Any] = None
__lowerCAmelCase: Any = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase: Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase: Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase: List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase: Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase: Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCAmelCase: List[str] = self.remap_to_used(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase: Dict = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCAmelCase: Any = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCAmelCase: List[str] = self.unmap_to_all(UpperCAmelCase )
__lowerCAmelCase: Tuple = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase: Optional[Any] = self.embedding(UpperCAmelCase )
if shape is not None:
__lowerCAmelCase: Optional[int] = z_q.view(UpperCAmelCase )
# reshape back to match original input shape
__lowerCAmelCase: Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( snake_case__ ):
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False ) -> Tuple:
__lowerCAmelCase: Optional[Any] = parameters
__lowerCAmelCase , __lowerCAmelCase: Tuple = torch.chunk(UpperCAmelCase , 2 , dim=1 )
__lowerCAmelCase: Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowerCAmelCase: List[Any] = deterministic
__lowerCAmelCase: List[Any] = torch.exp(0.5 * self.logvar )
__lowerCAmelCase: Optional[int] = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase: int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__lowerCAmelCase: Union[str, Any] = randn_tensor(
self.mean.shape , generator=UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCAmelCase: Optional[Any] = self.mean + self.std * sample
return x
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int]=None ) -> Dict:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCAmelCase ( self : str , UpperCAmelCase : str , UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase: Tuple = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
return self.mean
| 322 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__snake_case : Tuple = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 248 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A : Optional[Any] = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A : Any = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = WATERMARK_BITS
lowerCamelCase__ : Union[str, Any] = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
lowerCamelCase__ : Optional[int] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ : Dict = [self.encoder.encode(__lowerCamelCase , "dwtDct" ) for image in images]
lowerCamelCase__ : Optional[int] = torch.from_numpy(np.array(__lowerCamelCase ) ).permute(0 , 3 , 1 , 2 )
lowerCamelCase__ : Dict = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 184 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Dict = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Any = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 313 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case_ : Union[str, Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def A (__A : int , __A : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , __snake_case )
| 51 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
import logging
from transformers import PretrainedConfig
a =logging.getLogger(__name__)
a ={
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''bertabs'''
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Tuple=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : Dict=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.2 ,SCREAMING_SNAKE_CASE__ : List[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=8 ,SCREAMING_SNAKE_CASE__ : List[Any]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.2 ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : List[str] = max_pos
__lowerCamelCase : str = enc_layers
__lowerCamelCase : Dict = enc_hidden_size
__lowerCamelCase : Tuple = enc_heads
__lowerCamelCase : Any = enc_ff_size
__lowerCamelCase : Optional[int] = enc_dropout
__lowerCamelCase : str = dec_layers
__lowerCamelCase : List[Any] = dec_hidden_size
__lowerCamelCase : Optional[int] = dec_heads
__lowerCamelCase : int = dec_ff_size
__lowerCamelCase : Union[str, Any] = dec_dropout
| 73 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 0 |
from __future__ import annotations
from collections import namedtuple
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple:
"""simple docstring"""
lowerCamelCase__ : List[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = feature_size
_UpperCAmelCase : int = sampling_rate
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : int = kwargs.pop("padding_side" , "right" )
_UpperCAmelCase : List[Any] = kwargs.pop("return_attention_mask" , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple = True , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : List[Any] = False , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Optional[Any] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_UpperCAmelCase : List[str] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_UpperCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
_UpperCAmelCase : Union[str, Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase__ ) == 0:
if return_attention_mask:
_UpperCAmelCase : Dict = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCAmelCase : Any = required_input[0]
if isinstance(lowerCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCAmelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase__ ):
_UpperCAmelCase : int = "tf"
elif is_torch_tensor(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = "pt"
elif isinstance(lowerCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
_UpperCAmelCase : Union[str, Any] = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowerCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_UpperCAmelCase : List[Any] = to_numpy(lowerCAmelCase__ )
else:
_UpperCAmelCase : Dict = [to_numpy(lowerCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCAmelCase : List[str] = self._get_padding_strategies(padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
_UpperCAmelCase : str = processed_features[self.model_input_names[0]]
_UpperCAmelCase : Dict = len(lowerCAmelCase__ )
if not all(len(lowerCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
_UpperCAmelCase : List[str] = []
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCAmelCase : List[Any] = self._truncate(
lowerCAmelCase__ , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )
truncated_inputs.append(lowerCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCAmelCase : List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCAmelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCAmelCase : Union[str, Any] = {}
for i in range(lowerCAmelCase__ ):
# padding
_UpperCAmelCase : int = self._pad(
truncated_inputs[i] , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCAmelCase : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Optional[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase__ )
return BatchFeature(lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Optional[Any] = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Union[str, Any] = None , ) -> dict:
"""simple docstring"""
_UpperCAmelCase : int = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : List[str] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCAmelCase : Dict = np.ones(len(lowerCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
_UpperCAmelCase : List[Any] = max_length - len(lowerCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCAmelCase : Tuple = np.pad(
processed_features["attention_mask"] , (0, difference) )
_UpperCAmelCase : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCAmelCase : Tuple = np.pad(
lowerCAmelCase__ , lowerCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCAmelCase : int = np.pad(
processed_features["attention_mask"] , (difference, 0) )
_UpperCAmelCase : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCAmelCase : str = np.pad(
lowerCAmelCase__ , lowerCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Dict = None , ) -> List[str]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
_UpperCAmelCase : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : str = len(lowerCAmelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCAmelCase : Tuple = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCAmelCase : Any = processed_features["attention_mask"][:max_length]
return processed_features
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Optional[int]=None ) -> Dict:
"""simple docstring"""
if padding is not False:
if padding is True:
_UpperCAmelCase : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = PaddingStrategy(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = padding
else:
_UpperCAmelCase : Union[str, Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy | 145 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
# TODO Update this
__A : Optional[Any] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'esm'
def __init__( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=None , lowerCamelCase : Any=None , lowerCamelCase : Union[str, Any]=7_68 , lowerCamelCase : int=12 , lowerCamelCase : Dict=12 , lowerCamelCase : Dict=30_72 , lowerCamelCase : int=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Dict=10_26 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Dict=1E-12 , lowerCamelCase : Any="absolute" , lowerCamelCase : Any=True , lowerCamelCase : Tuple=None , lowerCamelCase : str=False , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=None , **lowerCamelCase : Any , ) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase , mask_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : str = position_embedding_type
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : Union[str, Any] = emb_layer_norm_before
lowerCAmelCase_ : Any = token_dropout
lowerCAmelCase_ : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowerCAmelCase_ : Tuple = EsmFoldConfig()
elif isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Tuple = EsmFoldConfig(**lowerCamelCase )
lowerCAmelCase_ : int = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowerCAmelCase_ : List[Any] = get_default_vocab_list()
else:
lowerCAmelCase_ : List[Any] = vocab_list
else:
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def __lowercase ( self : Dict ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase ):
lowerCAmelCase_ : str = self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = None
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = True
lowercase = False
lowercase = 1_28
lowercase = None
def __lowercase ( self : Any ) -> Union[str, Any]:
if self.trunk is None:
lowerCAmelCase_ : Union[str, Any] = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase ):
lowerCAmelCase_ : Tuple = TrunkConfig(**self.trunk )
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = asdict(self )
lowerCAmelCase_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = 48
lowercase = 10_24
lowercase = 1_28
lowercase = 32
lowercase = 32
lowercase = 32
lowercase = 0
lowercase = 0
lowercase = False
lowercase = 4
lowercase = 1_28
lowercase = None
def __lowercase ( self : Union[str, Any] ) -> List[str]:
if self.structure_module is None:
lowerCAmelCase_ : int = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase ):
lowerCAmelCase_ : List[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
lowerCAmelCase_ : int = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase_ : Dict = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __lowercase ( self : Dict ) -> Dict:
lowerCAmelCase_ : str = asdict(self )
lowerCAmelCase_ : int = self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = 3_84
lowercase = 1_28
lowercase = 16
lowercase = 1_28
lowercase = 12
lowercase = 4
lowercase = 8
lowercase = 0.1
lowercase = 8
lowercase = 1
lowercase = 2
lowercase = 7
lowercase = 10
lowercase = 1E-8
lowercase = 1E5
def __lowercase ( self : Dict ) -> List[Any]:
return asdict(self )
def UpperCamelCase_ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 120 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase : Any = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , *A : Optional[int] , **A : Tuple ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 111 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =parent
a__ : List[str] =batch_size
a__ : str =image_size
a__ : Dict =num_channels
a__ : Any =num_stages
a__ : int =hidden_sizes
a__ : Optional[int] =depths
a__ : List[str] =is_training
a__ : Union[str, Any] =use_labels
a__ : str =intermediate_size
a__ : List[str] =hidden_act
a__ : Optional[Any] =num_labels
a__ : List[Any] =initializer_range
a__ : Optional[Any] =out_features
a__ : Any =out_indices
a__ : List[str] =scope
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] =None
if self.use_labels:
a__ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
a__ : Dict =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : List[Any] =ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Tuple =model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : Optional[int] =ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a__ : str =None
a__ : int =ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : int =model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.prepare_config_and_inputs()
a__ , a__ , a__ : int =config_and_inputs
a__ : Dict ={"pixel_values": pixel_values}
return config, inputs_dict
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] =config_and_inputs
a__ : Union[str, Any] ={"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : List[str] = False
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =ConvNextVaModelTester(self )
a__ : str =ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a__ , a__ : str =self.model_tester.prepare_config_and_inputs_with_labels()
a__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
a__ : Any =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a__ : Tuple =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
a__ : Tuple =model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a__ , a__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
a__ : Union[str, Any] =False
a__ : List[str] =True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : List[Any] =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
a__ : Dict =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
a__ : List[Any] =model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] =model_class(lowerCAmelCase__ )
a__ : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Dict =[*signature.parameters.keys()]
a__ : int =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[int] =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ : Any =self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a__ , a__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Any =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] =ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCAmelCase__ )
a__ : Any =self.default_image_processor
a__ : Any =prepare_img()
a__ : str =preprocessor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : List[str] =model(**lowerCAmelCase__ )
# verify the logits
a__ : str =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : str =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 95 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=0.9_9_9 , SCREAMING_SNAKE_CASE : Any="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__lowerCAmelCase: Optional[Any] = []
for i in range(__snake_case ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class A_ ( snake_case__ , snake_case__ ):
_lowercase : int = [e.name for e in KarrasDiffusionSchedulers]
_lowercase : List[str] = 2
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] = 1_0_0_0 , UpperCAmelCase : Optional[int] = 0.00085 , UpperCAmelCase : int = 0.012 , UpperCAmelCase : Union[str, Any] = "linear" , UpperCAmelCase : int = None , UpperCAmelCase : List[str] = "epsilon" , UpperCAmelCase : int = False , UpperCAmelCase : List[str] = False , UpperCAmelCase : Optional[int] = 1.0 , UpperCAmelCase : Any = "linspace" , UpperCAmelCase : List[Any] = 0 , ) -> List[str]:
if trained_betas is not None:
__lowerCAmelCase: Tuple = torch.tensor(UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCAmelCase: List[str] = torch.linspace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: List[str] = betas_for_alpha_bar(UpperCAmelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__lowerCAmelCase: List[Any] = betas_for_alpha_bar(UpperCAmelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
__lowerCAmelCase: List[Any] = 1.0 - self.betas
__lowerCAmelCase: Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = use_karras_sigmas
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=None ) -> Dict:
if schedule_timesteps is None:
__lowerCAmelCase: Dict = self.timesteps
__lowerCAmelCase: Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCAmelCase: Optional[int] = 1 if len(UpperCAmelCase ) > 1 else 0
else:
__lowerCAmelCase: Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase ) else timestep
__lowerCAmelCase: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self : Dict ) -> str:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , ) -> torch.FloatTensor:
__lowerCAmelCase: List[str] = self.index_for_timestep(UpperCAmelCase )
__lowerCAmelCase: Dict = self.sigmas[step_index]
__lowerCAmelCase: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any = None , UpperCAmelCase : Union[str, Any] = None , ) -> int:
__lowerCAmelCase: Union[str, Any] = num_inference_steps
__lowerCAmelCase: List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCAmelCase: List[str] = np.linspace(0 , num_train_timesteps - 1 , UpperCAmelCase , dtype=UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCAmelCase: Tuple = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCAmelCase: List[str] = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCAmelCase: List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCAmelCase: Any = (np.arange(UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__lowerCAmelCase: str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCAmelCase: Union[str, Any] = np.log(UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = np.interp(UpperCAmelCase , np.arange(0 , len(UpperCAmelCase ) ) , UpperCAmelCase )
if self.config.use_karras_sigmas:
__lowerCAmelCase: Any = self._convert_to_karras(in_sigmas=UpperCAmelCase , num_inference_steps=self.num_inference_steps )
__lowerCAmelCase: List[Any] = np.array([self._sigma_to_t(UpperCAmelCase , UpperCAmelCase ) for sigma in sigmas] )
__lowerCAmelCase: List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCAmelCase: List[Any] = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCAmelCase: List[Any] = torch.from_numpy(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
__lowerCAmelCase: str = timesteps.to(UpperCAmelCase , dtype=torch.floataa )
else:
__lowerCAmelCase: str = timesteps.to(device=UpperCAmelCase )
# empty dt and derivative
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCAmelCase: Any = defaultdict(UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any ) -> Optional[int]:
# get log sigma
__lowerCAmelCase: Optional[int] = np.log(UpperCAmelCase )
# get distribution
__lowerCAmelCase: Optional[int] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCAmelCase: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCAmelCase: Dict = low_idx + 1
__lowerCAmelCase: str = log_sigmas[low_idx]
__lowerCAmelCase: str = log_sigmas[high_idx]
# interpolate sigmas
__lowerCAmelCase: int = (low - log_sigma) / (low - high)
__lowerCAmelCase: Dict = np.clip(UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
__lowerCAmelCase: List[Any] = (1 - w) * low_idx + w * high_idx
__lowerCAmelCase: Optional[int] = t.reshape(sigma.shape )
return t
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> torch.FloatTensor:
__lowerCAmelCase: int = in_sigmas[-1].item()
__lowerCAmelCase: Optional[Any] = in_sigmas[0].item()
__lowerCAmelCase: str = 7.0 # 7.0 is the value used in the paper
__lowerCAmelCase: List[Any] = np.linspace(0 , 1 , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = sigma_min ** (1 / rho)
__lowerCAmelCase: int = sigma_max ** (1 / rho)
__lowerCAmelCase: Union[str, Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return self.dt is None
def UpperCAmelCase ( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : int = True , ) -> Union[SchedulerOutput, Tuple]:
__lowerCAmelCase: Any = self.index_for_timestep(UpperCAmelCase )
# advance index counter by 1
__lowerCAmelCase: List[Any] = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCAmelCase: List[str] = self.sigmas[step_index]
__lowerCAmelCase: Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCAmelCase: List[Any] = self.sigmas[step_index - 1]
__lowerCAmelCase: Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCAmelCase: Dict = 0
__lowerCAmelCase: Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCAmelCase: Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
__lowerCAmelCase: Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCAmelCase: Tuple = sigma_hat if self.state_in_first_order else sigma_next
__lowerCAmelCase: str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCAmelCase: int = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
__lowerCAmelCase: Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCAmelCase: int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCAmelCase: int = sigma_next - sigma_hat
# store for 2nd order step
__lowerCAmelCase: Any = derivative
__lowerCAmelCase: List[Any] = dt
__lowerCAmelCase: Tuple = sample
else:
# 2. 2nd order / Heun's method
__lowerCAmelCase: str = (sample - pred_original_sample) / sigma_next
__lowerCAmelCase: Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCAmelCase: Union[str, Any] = self.dt
__lowerCAmelCase: List[str] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCAmelCase: List[Any] = None
__lowerCAmelCase: List[Any] = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCAmelCase: Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase ):
# mps does not support float64
__lowerCAmelCase: str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCAmelCase: int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCAmelCase: List[str] = self.timesteps.to(original_samples.device )
__lowerCAmelCase: Optional[int] = timesteps.to(original_samples.device )
__lowerCAmelCase: Tuple = [self.index_for_timestep(UpperCAmelCase , UpperCAmelCase ) for t in timesteps]
__lowerCAmelCase: str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCAmelCase: List[Any] = sigma.unsqueeze(-1 )
__lowerCAmelCase: Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ) -> Tuple:
return self.config.num_train_timesteps
| 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Any = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__(a_ ):
"""simple docstring"""
_A : Dict = '''data2vec-audio'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0_2 , _lowercase=1e-5 , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=16 , _lowercase=19 , _lowercase=5 , _lowercase=0.0_5 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="sum" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=(512, 512, 512, 512, 1_500) , _lowercase=(5, 3, 3, 1, 1) , _lowercase=(1, 2, 3, 1, 1) , _lowercase=512 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=3 , _lowercase=2 , _lowercase=3 , _lowercase=None , **_lowercase , ) -> List[Any]:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
a_ : Union[str, Any] = hidden_size
a_ : Tuple = feat_extract_activation
a_ : Union[str, Any] = list(_lowercase )
a_ : Tuple = list(_lowercase )
a_ : Optional[Any] = list(_lowercase )
a_ : int = conv_bias
a_ : int = num_conv_pos_embeddings
a_ : Optional[Any] = num_conv_pos_embedding_groups
a_ : Tuple = conv_pos_kernel_size
a_ : List[str] = len(self.conv_dim )
a_ : List[Any] = num_hidden_layers
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : int = num_attention_heads
a_ : str = hidden_dropout
a_ : Any = attention_dropout
a_ : Tuple = activation_dropout
a_ : Tuple = feat_proj_dropout
a_ : Optional[Any] = final_dropout
a_ : Tuple = layerdrop
a_ : Any = layer_norm_eps
a_ : int = initializer_range
a_ : Dict = vocab_size
a_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ : Any = mask_time_prob
a_ : Tuple = mask_time_length
a_ : Optional[int] = mask_time_min_masks
a_ : List[str] = mask_feature_prob
a_ : Optional[Any] = mask_feature_length
a_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
a_ : Dict = ctc_loss_reduction
a_ : List[str] = ctc_zero_infinity
# adapter
a_ : int = add_adapter
a_ : Optional[Any] = adapter_kernel_size
a_ : int = adapter_stride
a_ : Dict = num_adapter_layers
a_ : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a_ : int = list(_lowercase )
a_ : Union[str, Any] = list(_lowercase )
a_ : Optional[Any] = list(_lowercase )
a_ : Tuple = xvector_output_dim
@property
def UpperCamelCase__ ( self ) -> int:
return math.prod(self.conv_stride )
| 248 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase__ : Dict = [144, 192, 240]
lowerCamelCase__ : List[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase__ : Union[str, Any] = [96, 120, 144]
lowerCamelCase__ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase__ : Optional[int] = [64, 80, 96]
lowerCamelCase__ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase__ : str = 0.05
lowerCamelCase__ : int = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
lowerCamelCase__ : int = 512
lowerCamelCase__ : int = 16
lowerCamelCase__ : Any = 21
lowerCamelCase__ : Union[str, Any] = "pascal-voc-id2label.json"
else:
lowerCamelCase__ : Dict = 1000
lowerCamelCase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowerCamelCase__ : str = "huggingface/label-files"
lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _A : List[str] , _A : Tuple=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
lowerCamelCase__ : Any = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowerCamelCase__ : Tuple = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
lowerCamelCase__ : List[str] = name.replace(".block." , "." )
if "exp_1x1" in name:
lowerCamelCase__ : List[Any] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
lowerCamelCase__ : int = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
lowerCamelCase__ : Dict = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
lowerCamelCase__ : Optional[Any] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
lowerCamelCase__ : Optional[int] = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCamelCase__ : int = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCamelCase__ : Optional[Any] = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
lowerCamelCase__ : Tuple = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
lowerCamelCase__ : List[Any] = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
lowerCamelCase__ : Optional[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
lowerCamelCase__ : List[Any] = name.replace(F".global_rep.{i}.weight" , ".layernorm.weight" )
if F".global_rep.{i}.bias" in name:
lowerCamelCase__ : int = name.replace(F".global_rep.{i}.bias" , ".layernorm.bias" )
if ".global_rep." in name:
lowerCamelCase__ : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase__ : List[Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
lowerCamelCase__ : List[str] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
lowerCamelCase__ : Dict = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
lowerCamelCase__ : List[str] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
lowerCamelCase__ : Dict = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
lowerCamelCase__ : Dict = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
lowerCamelCase__ : Optional[int] = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
lowerCamelCase__ : Optional[Any] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase__ : Union[str, Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
lowerCamelCase__ : Optional[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase__ : Dict = "mobilevit." + name
return name
def lowercase_ ( _A : Tuple , _A : str , _A : List[Any]=False ):
"""simple docstring"""
if base_model:
lowerCamelCase__ : Optional[int] = ""
else:
lowerCamelCase__ : Optional[Any] = "mobilevit."
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Tuple = orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
lowerCamelCase__ : Union[str, Any] = key[8:]
if "qkv" in key:
lowerCamelCase__ : Optional[Any] = key.split("." )
lowerCamelCase__ : List[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase__ : Dict = int(key_split[3] )
lowerCamelCase__ : Optional[Any] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
lowerCamelCase__ : Optional[Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase__ : Optional[Any] = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowerCamelCase__ : Any = val[:dim, :]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2, :]
lowerCamelCase__ : Any = val[-dim:, :]
else:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : Optional[Any] = val[-dim:]
else:
lowerCamelCase__ : Dict = val
return orig_state_dict
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : Dict , _A : Dict , _A : Optional[int] , _A : Optional[int]=False ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_mobilevit_config(__snake_case )
# load original state_dict
lowerCamelCase__ : str = torch.load(__snake_case , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
lowerCamelCase__ : Union[str, Any] = MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
lowerCamelCase__ : List[Any] = MobileViTForImageClassification(__snake_case ).eval()
lowerCamelCase__ : Tuple = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__ : Tuple = model(**__snake_case )
lowerCamelCase__ : int = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase__ : Any = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase__ : List[str] = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase__ : int = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowerCamelCase__ : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
lowerCamelCase__ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization="apple" )
model.push_to_hub(__snake_case , organization="apple" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',"
" \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a__ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a__ : Optional[Any] = [0, 25, 50]
a__ : int = [25, 50, 75]
a__ : List[str] = fuzz.membership.trimf(X, abca)
a__ : Dict = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a__ : int = np.ones(75)
a__ : Union[str, Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
a__ : Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a__ : str = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a__ : Optional[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a__ : Any = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a__ : int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a__ : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a__ : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a__ : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 313 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {"tokenizer_file": "tokenizer.json"}
snake_case_ : Tuple = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[Any] = None
def __init__( self : int , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[int]="<unk>" , _snake_case : List[str]="<s>" , _snake_case : int="</s>" , _snake_case : Optional[int]="<pad>" , _snake_case : Dict=False , _snake_case : Optional[Any]=False , **_snake_case : str , ):
"""simple docstring"""
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , add_prefix_space=_snake_case , clean_up_tokenization_spaces=_snake_case , **_snake_case , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , _snake_case) != add_prefix_space:
UpperCAmelCase_ = getattr(_snake_case , pre_tok_state.pop('''type'''))
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**_snake_case)
UpperCAmelCase_ = add_prefix_space
def lowerCamelCase ( self : Optional[Any] , *_snake_case : str , **_snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : List[str] , **_snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : int , _snake_case : Any = None):
"""simple docstring"""
UpperCAmelCase_ = self._tokenizer.model.save(_snake_case , name=_snake_case)
return tuple(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case) + [self.eos_token_id])
if len(_snake_case) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 51 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=1_0 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 * 8 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 * 8 ,SCREAMING_SNAKE_CASE__ : List[str]=4 ,SCREAMING_SNAKE_CASE__ : Dict=6_4 ,):
__lowerCamelCase : List[Any] = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[str] = use_auxiliary_loss
__lowerCamelCase : List[Any] = num_queries
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Union[str, Any] = min_size
__lowerCamelCase : str = max_size
__lowerCamelCase : List[Any] = num_labels
__lowerCamelCase : str = hidden_dim
__lowerCamelCase : Optional[int] = hidden_dim
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__) > 0.5
).float()
__lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=SCREAMING_SNAKE_CASE__) > 0.5).long()
__lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
__lowerCamelCase : str = self.num_queries
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : Tuple = [1, 1, 1, 1]
__lowerCamelCase : Dict = self.num_channels
__lowerCamelCase : List[Any] = 6_4
__lowerCamelCase : str = 1_2_8
__lowerCamelCase : int = self.hidden_dim
__lowerCamelCase : Any = self.hidden_dim
__lowerCamelCase : List[Any] = self.hidden_dim
return config
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = output.encoder_hidden_states
__lowerCamelCase : List[Any] = output.pixel_decoder_hidden_states
__lowerCamelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,config.decoder_layers)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int=False):
with torch.no_grad():
__lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : List[str] = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE__ : Optional[int]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
__lowerCamelCase : int = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__)
comm_check_on_output(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model(
pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__)
comm_check_on_output(SCREAMING_SNAKE_CASE__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape ,torch.Size([1]))
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCAmelCase : Optional[int] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = False
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = MaskaFormerModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def lowerCAmelCase ( self : Dict):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def lowerCAmelCase ( self : Tuple):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase ( self : Union[str, Any]):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self : Tuple):
pass
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Dict = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase : List[Any] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = (self.model_tester.min_size,) * 2
__lowerCamelCase : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=SCREAMING_SNAKE_CASE__),
'mask_labels': torch.randn((2, 1_0, *size) ,device=SCREAMING_SNAKE_CASE__),
'class_labels': torch.zeros(2 ,1_0 ,device=SCREAMING_SNAKE_CASE__).long(),
}
__lowerCamelCase : int = self.model_tester.get_config()
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ,output_attentions=SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase ( self : Dict):
if not self.model_tester.is_training:
return
__lowerCamelCase : List[str] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.train()
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__).loss
loss.backward()
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple = True
__lowerCamelCase : str = True
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
model.train()
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
a =1E-4
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[int]):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self : str):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE__).eval()
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__)
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
__lowerCamelCase : List[Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__lowerCamelCase : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
# class_queries_logits
__lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1))
__lowerCamelCase : Optional[int] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE__).eval()
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Any = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] ,segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] ,return_tensors='pt' ,)
__lowerCamelCase : Tuple = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE__) for el in inputs['mask_labels']]
__lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE__) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.loss is not None)
| 73 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_A : Optional[int] = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls : int ) ->List[str]:
lowerCamelCase__ : Any = TOKEN
HfFolder.save_token(A )
@classmethod
def __lowerCamelCase ( cls : List[str] ) ->Dict:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __lowerCamelCase ( self : List[str] ) ->Dict:
lowerCamelCase__ : int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , repo_id='''test-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
lowerCamelCase__ : Dict = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def __lowerCamelCase ( self : Optional[int] ) ->str:
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(F"{USER}/test-dynamic-config" , trust_remote_code=A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 4_2 )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : List[Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : Dict = not c.scale_attn_weights # bool
lowerCamelCase__ : Union[str, Any] = c.summary_type + '''foo''' # str
c.update_from_string(
F"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(A , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(A , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(A , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(A , c.summary_type , '''mismatch for key: summary_type''' )
def __lowerCamelCase ( self : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : str = PretrainedConfig()
lowerCamelCase__ : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCamelCase__ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(A , A )]
if len(A ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F" {', '.join(A )}." )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : int = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCamelCase__ : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(A )
def __lowerCamelCase ( self : str ) ->List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ : Tuple = mock.Mock()
lowerCamelCase__ : Union[str, Any] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : List[Any] = HTTPError
lowerCamelCase__ : Tuple = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self : List[str] ) ->Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCamelCase__ : int = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A )
lowerCamelCase__ : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(A , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : List[Any] = ['''config.42.0.0.json''']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(A )
shutil.move(os.path.join(A , '''config.4.0.0.json''' ) , os.path.join(A , '''config.42.0.0.json''' ) )
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(A )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def __lowerCamelCase ( self : List[str] ) ->List[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase__ : Tuple = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCamelCase__ : Dict = '''v4.0.0'''
lowerCamelCase__ , lowerCamelCase__ : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
A , return_unused_kwargs=A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = '''v3.0.0'''
lowerCamelCase__ : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(A )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 142 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( a_: Union[str, Any], a_: Dict ):
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __UpperCAmelCase ( a_: Dict, a_: Union[str, Any], a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path / "cache"
_UpperCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = ParquetDatasetReader(__snake_case, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = tmp_path / "cache"
_UpperCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Any = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __UpperCAmelCase ( a_: Union[str, Any], a_: str, a_: str ):
_UpperCAmelCase : Dict = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : str = ParquetDatasetReader(__snake_case, cache_dir=__snake_case, split=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list] )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any], a_: List[str] ):
if issubclass(__snake_case, __snake_case ):
_UpperCAmelCase : Optional[Any] = parquet_path
elif issubclass(__snake_case, __snake_case ):
_UpperCAmelCase : Optional[int] = [parquet_path]
_UpperCAmelCase : str = tmp_path / "cache"
_UpperCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Dict = ParquetDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case, __snake_case )
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[Any], a_: str=("train",) ):
assert isinstance(__snake_case, __snake_case )
for split in splits:
_UpperCAmelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __UpperCAmelCase ( a_: Tuple, a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : Tuple = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path}, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __UpperCAmelCase ( a_: List[str], a_: Optional[Any], a_: List[Any] ):
_UpperCAmelCase : Optional[Any] = tmp_path / "cache"
_UpperCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : str = features.copy() if features else default_expected_features
_UpperCAmelCase : List[Any] = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : int = ParquetDatasetReader({"train": parquet_path}, features=__snake_case, cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __UpperCAmelCase ( a_: List[str], a_: List[str], a_: Tuple ):
if split:
_UpperCAmelCase : int = {split: parquet_path}
else:
_UpperCAmelCase : Any = "train"
_UpperCAmelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase : str = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case, __snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCAmelCase ( a_: List[Any], a_: Dict ):
_UpperCAmelCase : Dict = ParquetDatasetWriter(__snake_case, tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase : int = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def __UpperCAmelCase ( a_: Tuple, a_: int ):
_UpperCAmelCase : Optional[Any] = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase : Tuple = {"image": [image_path]}
_UpperCAmelCase : List[str] = Features({"image": Image()} )
_UpperCAmelCase : Any = Dataset.from_dict(__snake_case, features=__snake_case )
_UpperCAmelCase : Union[str, Any] = ParquetDatasetWriter(__snake_case, tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase : int = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ), streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected", [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __UpperCAmelCase ( a_: Dict, a_: Optional[int] ):
assert get_writer_batch_size(__snake_case ) == expected | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCAmelCase_ : Optional[int] = grid[0]
for row_n in range(1 , len(__snake_case ) ):
lowerCAmelCase_ : List[str] = grid[row_n]
lowerCAmelCase_ : List[str] = fill_row(__snake_case , __snake_case )
lowerCAmelCase_ : List[str] = grid[row_n]
return grid[-1][-1]
def UpperCamelCase_ ( A__ : List[Any] , A__ : List[Any] ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , A : int=None , A : List[str]=None , *A : str , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__snake_case: Any = self.model.config
else:
__snake_case: Union[str, Any] = config
__snake_case: Any = data_args
__snake_case: List[Any] = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__snake_case: int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__snake_case: List[Any] = label_smoothed_nll_loss
def UpperCAmelCase__ ( self : Union[str, Any] , A : Tuple ):
if self.optimizer is None:
__snake_case: Dict = ["""bias""", """LayerNorm.weight"""]
__snake_case: List[str] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__snake_case: List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__snake_case: int = Adafactor
__snake_case: Optional[int] = {"""scale_parameter""": False, """relative_step""": False}
else:
__snake_case: str = AdamW
__snake_case: Optional[int] = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__snake_case: Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
__snake_case: str = OSS(
params=A , optim=A , **A , )
else:
__snake_case: Tuple = optimizer_cls(A , **A )
if self.lr_scheduler is None:
__snake_case: List[Any] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase__ ( self : Union[str, Any] , A : str ):
__snake_case: Dict = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__snake_case: Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__snake_case: List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__snake_case: Union[str, Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def UpperCAmelCase__ ( self : int ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase__ ( self : Dict , A : Tuple , A : List[Any] , A : str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__snake_case: Dict = model(**A , use_cache=A )[0]
__snake_case: Optional[int] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__snake_case , __snake_case: str = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
__snake_case: int = model(**A , use_cache=A )[0]
__snake_case: int = torch.nn.functional.log_softmax(A , dim=-1 )
__snake_case , __snake_case: List[Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase__ ( self : Tuple , A : List[str] , A : Union[str, Any] ):
__snake_case: Any = inputs.pop("""labels""" )
__snake_case , __snake_case: Tuple = self._compute_loss(A , A , A )
return loss
def UpperCAmelCase__ ( self : int , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] , A : List[Any] = None , ):
__snake_case: Tuple = self._prepare_inputs(A )
__snake_case: str = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__snake_case: Tuple = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__snake_case: int = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
__snake_case: Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__snake_case , __snake_case: List[Any] = self._compute_loss(A , A , A )
__snake_case: Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__snake_case: Optional[int] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__snake_case: str = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : int ):
# If PAD token is not defined at least EOS token has to be defined
__snake_case: str = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__snake_case: Any = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__snake_case: Any = tensor
return padded_tensor
| 111 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
from timeit import timeit
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a__ : Union[str, Any] =0
while number:
number &= number - 1
result += 1
return result
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a__ : List[Any] =0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ):
"""simple docstring"""
def do_benchmark(SCREAMING_SNAKE_CASE : List[Any] ) -> None:
a__ : Optional[Any] ="import __main__ as z"
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(__snake_case ) = }''' )
a__ : Optional[Any] =timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__snake_case )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(__snake_case ) = }''' )
a__ : str =timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__snake_case , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(__snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 95 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 322 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=64 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=2 , _lowercase=2 , _lowercase=2 , _lowercase=2 , _lowercase=4 , _lowercase=1 , ) -> int:
a_ : Tuple = parent
a_ : Dict = batch_size
a_ : int = seq_length
a_ : Union[str, Any] = is_training
a_ : int = use_input_mask
a_ : Optional[Any] = use_token_type_ids
a_ : List[str] = use_labels
a_ : Dict = vocab_size
a_ : Optional[int] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : Dict = type_vocab_size
a_ : Dict = type_sequence_label_size
a_ : Dict = initializer_range
a_ : Dict = num_labels
a_ : Dict = num_choices
a_ : Dict = scope
a_ : Optional[int] = q_groups
a_ : Optional[Any] = k_groups
a_ : Optional[int] = v_groups
a_ : List[str] = post_attention_groups
a_ : Tuple = intermediate_groups
a_ : Tuple = output_groups
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Optional[Any] = None
if self.use_input_mask:
a_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
a_ : List[Any] = None
a_ : Optional[Any] = None
a_ : Union[str, Any] = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
a_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
a_ : List[str] = SqueezeBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[Any] = model(_lowercase , _lowercase )
a_ : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
a_ : List[str] = SqueezeBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Any = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
a_ : str = SqueezeBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[Any] = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : Tuple = self.num_labels
a_ : Union[str, Any] = SqueezeBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[Any] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
a_ : Any = self.num_labels
a_ : Optional[int] = SqueezeBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Dict = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Optional[Any] = self.num_choices
a_ : Optional[Any] = SqueezeBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : str = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) -> str:
a_ : List[Any] = self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_) , (a_) , (a_)) : str = config_and_inputs
a_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_A : Any = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : int = False
_A : Tuple = True
_A : int = False
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Dict = SqueezeBertModelTester(self )
a_ : Dict = ConfigTester(self , config_class=_lowercase , dim=37 )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_lowercase )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_lowercase )
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_lowercase )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = SqueezeBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Dict:
a_ : Optional[int] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
a_ : Optional[int] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
a_ : Dict = model(_lowercase )[0]
a_ : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , _lowercase )
a_ : List[str] = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-4 ) )
| 248 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
from __future__ import annotations
def lowercase_ ( _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : List[str] , ):
"""simple docstring"""
lowerCamelCase__ : List[str] = len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print("" )
print(len(__snake_case ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 184 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="resnet50" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , ) ->str:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : int = stage_names
SCREAMING_SNAKE_CASE : List[str] = out_features
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : List[str] = is_training
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) ->Optional[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Any = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = '''resnet18'''
SCREAMING_SNAKE_CASE : Optional[int] = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : List[Any] = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[Any] = AutoBackbone.from_pretrained(_lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : List[str] = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : List[str] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : str = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
| 313 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = '''AutoTokenizer'''
UpperCAmelCase__ : str = ['''tokenizer''']
UpperCAmelCase__ : int = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Union[str, Any] , _snake_case : List[str] , _snake_case : List[str]=None):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = speaker_embeddings
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Any , _snake_case : Tuple="speaker_embeddings_path.json" , **_snake_case : int):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
UpperCAmelCase_ = get_file_from_repo(
_snake_case , _snake_case , subfolder=kwargs.pop('''subfolder''' , _snake_case) , cache_dir=kwargs.pop('''cache_dir''' , _snake_case) , force_download=kwargs.pop('''force_download''' , _snake_case) , proxies=kwargs.pop('''proxies''' , _snake_case) , resume_download=kwargs.pop('''resume_download''' , _snake_case) , local_files_only=kwargs.pop('''local_files_only''' , _snake_case) , use_auth_token=kwargs.pop('''use_auth_token''' , _snake_case) , revision=kwargs.pop('''revision''' , _snake_case) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_snake_case , _snake_case)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""")
UpperCAmelCase_ = None
else:
with open(_snake_case) as speaker_embeddings_json:
UpperCAmelCase_ = json.load(_snake_case)
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , **_snake_case)
return cls(tokenizer=_snake_case , speaker_embeddings=_snake_case)
def lowerCamelCase ( self : int , _snake_case : int , _snake_case : Tuple="speaker_embeddings_path.json" , _snake_case : Any="speaker_embeddings" , _snake_case : List[Any] = False , **_snake_case : Optional[int] , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_snake_case , _snake_case , '''v2''') , exist_ok=_snake_case)
UpperCAmelCase_ = {}
UpperCAmelCase_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase_ = self._load_voice_preset(_snake_case)
UpperCAmelCase_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _snake_case , F"""{prompt_key}_{key}""") , voice_preset[key] , allow_pickle=_snake_case , )
UpperCAmelCase_ = os.path.join(_snake_case , F"""{prompt_key}_{key}.npy""")
UpperCAmelCase_ = tmp_dict
with open(os.path.join(_snake_case , _snake_case) , '''w''') as fp:
json.dump(_snake_case , _snake_case)
super().save_pretrained(_snake_case , _snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple = None , **_snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.speaker_embeddings[voice_preset]
UpperCAmelCase_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""")
UpperCAmelCase_ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''') , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _snake_case) , cache_dir=kwargs.pop('''cache_dir''' , _snake_case) , force_download=kwargs.pop('''force_download''' , _snake_case) , proxies=kwargs.pop('''proxies''' , _snake_case) , resume_download=kwargs.pop('''resume_download''' , _snake_case) , local_files_only=kwargs.pop('''local_files_only''' , _snake_case) , use_auth_token=kwargs.pop('''use_auth_token''' , _snake_case) , revision=kwargs.pop('''revision''' , _snake_case) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.""")
UpperCAmelCase_ = np.load(_snake_case)
return voice_preset_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] = None):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""")
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
def __call__( self : Any , _snake_case : str=None , _snake_case : Optional[int]=None , _snake_case : Tuple="pt" , _snake_case : Any=256 , _snake_case : str=False , _snake_case : Optional[int]=True , _snake_case : List[str]=False , **_snake_case : List[Any] , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(_snake_case , _snake_case):
if (
isinstance(_snake_case , _snake_case)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase_ = self._load_voice_preset(_snake_case)
else:
if isinstance(_snake_case , _snake_case) and not voice_preset.endswith('''.npz'''):
UpperCAmelCase_ = voice_preset + '''.npz'''
UpperCAmelCase_ = np.load(_snake_case)
if voice_preset is not None:
self._validate_voice_preset_dict(_snake_case , **_snake_case)
UpperCAmelCase_ = BatchFeature(data=_snake_case , tensor_type=_snake_case)
UpperCAmelCase_ = self.tokenizer(
_snake_case , return_tensors=_snake_case , padding='''max_length''' , max_length=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , add_special_tokens=_snake_case , **_snake_case , )
if voice_preset is not None:
UpperCAmelCase_ = voice_preset
return encoded_text
| 51 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 4_0_0_0_0_0_0 ) -> int:
__lowerCamelCase : str = []
__lowerCamelCase , __lowerCamelCase : List[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__lowerCamelCase , __lowerCamelCase : Dict = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowerCamelCase__ : List[str] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowerCamelCase__ : Union[str, Any] = model.state_dict()
def to_tf_var_name(UpperCAmelCase ):
for patt, repl in iter(__snake_case ):
lowerCamelCase__ : List[Any] = name.replace(__snake_case , __snake_case )
return f"bert/{name}"
def create_tf_var(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[str] = tf.dtypes.as_dtype(tensor.dtype )
lowerCamelCase__ : Dict = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCamelCase__ : Optional[Any] = to_tf_var_name(__snake_case )
lowerCamelCase__ : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCamelCase__ : Any = torch_tensor.T
lowerCamelCase__ : Any = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
lowerCamelCase__ : Union[str, Any] = session.run(__snake_case )
print(f"Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}" )
lowerCamelCase__ : str = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _a ( UpperCAmelCase=None ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' )
lowerCamelCase__ : Dict = parser.parse_args(__snake_case )
lowerCamelCase__ : str = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 142 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__snake_case ) )
_UpperCAmelCase : str = os.path.join(__snake_case, "words.txt" )
_UpperCAmelCase : List[str] = ""
with open(__snake_case ) as f:
_UpperCAmelCase : str = f.readline()
_UpperCAmelCase : Tuple = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_UpperCAmelCase : int = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution()) | 145 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : Optional[int] , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCamelCase_ ( A__ : List[str] , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Any = """"""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase_ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
lowerCAmelCase_ : Tuple = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase_ ( A__ : List[Any] , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : Any = message[:4]
lowerCAmelCase_ : List[str] = message[4:]
lowerCAmelCase_ : Tuple = apply_table(__snake_case , __snake_case )
lowerCAmelCase_ : List[Any] = xor(__snake_case , __snake_case )
lowerCAmelCase_ : Dict = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCAmelCase_ : Optional[Any] = apply_sbox(__snake_case , temp[4:] )
lowerCAmelCase_ : str = """0""" * (2 - len(__snake_case )) + l # noqa: E741
lowerCAmelCase_ : List[Any] = """0""" * (2 - len(__snake_case )) + r
lowerCAmelCase_ : str = apply_table(l + r , __snake_case )
lowerCAmelCase_ : int = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__A : List[str] = input("Enter 10 bit key: ")
__A : int = input("Enter 8 bit message: ")
__A : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
__A : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__A : List[Any] = [2, 4, 3, 1]
__A : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__A : str = [4, 1, 3, 5, 7, 2, 8, 6]
__A : Any = [4, 1, 2, 3, 2, 3, 4, 1]
__A : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__A : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__A : Tuple = apply_table(key, paa_table)
__A : Dict = temp[:5]
__A : Union[str, Any] = temp[5:]
__A : List[str] = left_shift(left)
__A : Union[str, Any] = left_shift(right)
__A : List[Any] = apply_table(left + right, pa_table)
__A : Optional[Any] = left_shift(left)
__A : Union[str, Any] = left_shift(right)
__A : Optional[Any] = left_shift(left)
__A : Dict = left_shift(right)
__A : Optional[int] = apply_table(left + right, pa_table)
# encryption
__A : Optional[Any] = apply_table(message, IP)
__A : Optional[Any] = function(expansion, sa, sa, keya, temp)
__A : str = temp[4:] + temp[:4]
__A : Optional[Any] = function(expansion, sa, sa, keya, temp)
__A : str = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__A : Optional[Any] = apply_table(CT, IP)
__A : Dict = function(expansion, sa, sa, keya, temp)
__A : Tuple = temp[4:] + temp[:4]
__A : Any = function(expansion, sa, sa, keya, temp)
__A : int = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 120 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __snake_case :
'''simple docstring'''
def __init__( self : str , A : str , A : Tuple=13 , A : Optional[int]=7 , A : List[str]=True , A : Any=True , A : Any=True , A : str=True , A : int=99 , A : Optional[Any]=32 , A : Tuple=2 , A : Optional[int]=4 , A : Any=37 , A : Optional[Any]="gelu" , A : int=0.1 , A : Optional[Any]=0.1 , A : Union[str, Any]=512 , A : List[str]=16 , A : Optional[int]=2 , A : int=0.02 , A : Dict=3 , A : List[str]=4 , A : int=None , ):
__snake_case: Tuple = parent
__snake_case: Union[str, Any] = 13
__snake_case: Union[str, Any] = 7
__snake_case: str = True
__snake_case: List[str] = True
__snake_case: str = True
__snake_case: Any = True
__snake_case: Optional[int] = 99
__snake_case: Any = 32
__snake_case: Union[str, Any] = 2
__snake_case: List[str] = 4
__snake_case: Tuple = 37
__snake_case: Any = """gelu"""
__snake_case: Optional[Any] = 0.1
__snake_case: Optional[int] = 0.1
__snake_case: List[str] = 512
__snake_case: Optional[int] = 16
__snake_case: Union[str, Any] = 2
__snake_case: Tuple = 0.02
__snake_case: int = 3
__snake_case: Optional[Any] = 4
__snake_case: Union[str, Any] = None
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: List[str] = None
if self.use_input_mask:
__snake_case: int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case: int = None
if self.use_token_type_ids:
__snake_case: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case: Union[str, Any] = None
__snake_case: Optional[Any] = None
__snake_case: str = None
if self.use_labels:
__snake_case: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case: Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Union[str, Any] , A : Dict , A : int , A : Any , A : Optional[Any] , A : Dict , A : List[str] , A : Optional[Any] ):
__snake_case: Optional[Any] = TFRoFormerModel(config=A )
__snake_case: Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case: int = [input_ids, input_mask]
__snake_case: str = model(A )
__snake_case: List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , A : Tuple , A : Optional[Any] , A : Optional[Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[int] ):
__snake_case: List[Any] = True
__snake_case: Optional[int] = TFRoFormerForCausalLM(config=A )
__snake_case: Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case: List[str] = model(A )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCAmelCase__ ( self : Any , A : Optional[Any] , A : Dict , A : str , A : Optional[Any] , A : Union[str, Any] , A : List[str] , A : Tuple ):
__snake_case: Optional[Any] = TFRoFormerForMaskedLM(config=A )
__snake_case: List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case: str = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] , A : List[Any] , A : List[Any] , A : int , A : List[str] , A : str , A : Union[str, Any] ):
__snake_case: str = self.num_labels
__snake_case: Union[str, Any] = TFRoFormerForSequenceClassification(config=A )
__snake_case: int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case: Dict = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[Any] , A : Dict , A : List[str] , A : str , A : List[Any] ):
__snake_case: Any = self.num_choices
__snake_case: List[Any] = TFRoFormerForMultipleChoice(config=A )
__snake_case: Union[str, Any] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
__snake_case: int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
__snake_case: Tuple = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
__snake_case: Optional[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case: Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : List[Any] , A : Dict , A : Union[str, Any] , A : Tuple , A : Dict , A : Optional[int] ):
__snake_case: Dict = self.num_labels
__snake_case: Union[str, Any] = TFRoFormerForTokenClassification(config=A )
__snake_case: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case: Dict = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : int , A : List[Any] , A : List[str] , A : str , A : List[Any] , A : Optional[int] , A : str , A : Optional[Any] ):
__snake_case: Dict = TFRoFormerForQuestionAnswering(config=A )
__snake_case: List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__snake_case: List[str] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
): Tuple = config_and_inputs
__snake_case: Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : str , A : Union[str, Any] , A : Union[str, Any] , A : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = TFRoFormerModelTester(self )
__snake_case: Tuple = ConfigTester(self , config_class=A , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case: Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case: Optional[int] = model(A )[0]
# TODO Replace vocab size
__snake_case: Optional[Any] = 50_000
__snake_case: Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__snake_case: Any = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1E-4 )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 1E-4
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = tf.constant([[4, 10]] )
__snake_case: str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__snake_case: Dict = emba(input_ids.shape )
__snake_case: Optional[int] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A , A , atol=self.tolerance )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Union[str, Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__snake_case: Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__snake_case: Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(A , A , atol=self.tolerance )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 1E-4
def UpperCAmelCase__ ( self : int ):
# 2,12,16,64
__snake_case: List[str] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__snake_case: str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__snake_case: int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__snake_case: str = embed_positions([2, 16, 768] )[None, None, :, :]
__snake_case , __snake_case: int = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A , A , A )
__snake_case: List[Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__snake_case: List[Any] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A , atol=self.tolerance )
| 111 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = """vivit"""
def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__=[2, 1_6, 1_6] , lowerCAmelCase__=3 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
a__ : List[str] =hidden_size
a__ : Any =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : List[Any] =hidden_act
a__ : Optional[int] =hidden_dropout_prob
a__ : Dict =attention_probs_dropout_prob
a__ : Any =initializer_range
a__ : Any =layer_norm_eps
a__ : Dict =image_size
a__ : Dict =num_frames
a__ : List[Any] =tubelet_size
a__ : Optional[int] =num_channels
a__ : str =qkv_bias
super().__init__(**lowerCAmelCase__ )
| 95 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_a = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_a = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class A_ ( snake_case__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]="<s>" , UpperCAmelCase : List[Any]="</s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Dict="<s>" , UpperCAmelCase : Any="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Tuple = None , **UpperCAmelCase : List[str] , ) -> None:
__lowerCAmelCase: Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowerCAmelCase: Optional[Any] = 7
__lowerCAmelCase: Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__lowerCAmelCase: Tuple = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__lowerCAmelCase: Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase: int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase: List[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__lowerCAmelCase: Any = len(self.sp_model )
__lowerCAmelCase: Dict = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: Optional[int] = self.__dict__.copy()
__lowerCAmelCase: List[Any] = None
__lowerCAmelCase: Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , UpperCAmelCase : Optional[Any] ) -> Any:
__lowerCAmelCase: Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase: Tuple = {}
__lowerCAmelCase: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowerCAmelCase: Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : List[Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] = None ) -> List[int]:
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : Dict ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase: Dict = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : int ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase: Optional[Any] = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : str , UpperCAmelCase : Union[str, Any] ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Optional[int] ) -> Dict:
__lowerCAmelCase: Any = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase: Union[str, Any] = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__lowerCAmelCase: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
def _UpperCAmelCase ( a__ = 1_0**1_2):
'''simple docstring'''
a_ : List[str] = 1
a_ : Optional[int] = 0
a_ : Optional[Any] = 1
a_ : str = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : str = 16
A : List[str] = 32
def lowercase_ ( _A : Optional[Any] , _A : Tuple = 16 ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase__ : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Optional[int] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : List[str] = 8
else:
lowerCamelCase__ : Union[str, Any] = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase__ : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase__ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Optional[int] = mocked_dataloaders # noqa: F811
def lowercase_ ( _A : List[Any] , _A : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __snake_case ) == "1":
lowerCamelCase__ : Any = 2
# New Code #
lowerCamelCase__ : Union[str, Any] = int(args.gradient_accumulation_steps )
lowerCamelCase__ : Dict = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : str = config["lr"]
lowerCamelCase__ : Optional[Any] = int(config["num_epochs"] )
lowerCamelCase__ : Tuple = int(config["seed"] )
lowerCamelCase__ : Union[str, Any] = int(config["batch_size"] )
lowerCamelCase__ : Tuple = evaluate.load("glue" , "mrpc" )
set_seed(__snake_case )
lowerCamelCase__ , lowerCamelCase__ : List[str] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : List[str] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
lowerCamelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
lowerCamelCase__ : Dict = model(**__snake_case )
lowerCamelCase__ : Optional[Any] = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Any = model(**__snake_case )
lowerCamelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__snake_case , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__snake_case , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
a__ : int = logging.get_logger(__name__)
# General docstring
a__ : Union[str, Any] = '''ResNetConfig'''
# Base docstring
a__ : Tuple = '''microsoft/resnet-50'''
a__ : str = [1, 2_048, 7, 7]
# Image classification docstring
a__ : Optional[int] = '''microsoft/resnet-50'''
a__ : Optional[int] = '''tiger cat'''
a__ : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 3 , _lowerCamelCase = 1 , _lowerCamelCase = "relu" ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Convad(
_lowerCamelCase , _lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=kernel_size // 2 , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = nn.BatchNormad(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tensor:
SCREAMING_SNAKE_CASE : int = self.convolution(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.normalization(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.activation(_lowerCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
SCREAMING_SNAKE_CASE : str = config.num_channels
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tensor:
SCREAMING_SNAKE_CASE : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
SCREAMING_SNAKE_CASE : Any = self.embedder(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.pooler(_lowerCamelCase )
return embedding
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 ) ->Dict:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.Convad(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , stride=_lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = nn.BatchNormad(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tensor:
SCREAMING_SNAKE_CASE : List[Any] = self.convolution(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.normalization(_lowerCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "relu" ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Optional[Any] = (
ResNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
ResNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) , ResNetConvLayer(_lowerCamelCase , _lowerCamelCase , activation=_lowerCamelCase ) , )
SCREAMING_SNAKE_CASE : Any = ACTaFN[activation]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : str = hidden_state
SCREAMING_SNAKE_CASE : int = self.layer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.shortcut(_lowerCamelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE : Dict = self.activation(_lowerCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "relu" , _lowerCamelCase = 4 ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : List[Any] = out_channels // reduction
SCREAMING_SNAKE_CASE : List[Any] = (
ResNetShortCut(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = nn.Sequential(
ResNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 ) , ResNetConvLayer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase ) , ResNetConvLayer(_lowerCamelCase , _lowerCamelCase , kernel_size=1 , activation=_lowerCamelCase ) , )
SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[activation]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_state
SCREAMING_SNAKE_CASE : List[Any] = self.layer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.shortcut(_lowerCamelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE : List[Any] = self.activation(_lowerCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 2 , ) ->List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowerCamelCase , _lowerCamelCase , stride=_lowerCamelCase , activation=config.hidden_act ) , *[layer(_lowerCamelCase , _lowerCamelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tensor:
SCREAMING_SNAKE_CASE : Any = input
for layer in self.layers:
SCREAMING_SNAKE_CASE : Optional[Any] = layer(_lowerCamelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
SCREAMING_SNAKE_CASE : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCamelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , depth=_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True ) ->BaseModelOutputWithNoAttention:
SCREAMING_SNAKE_CASE : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE : List[Any] = stage_module(_lowerCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCamelCase , hidden_states=_lowerCamelCase , )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ResNetConfig
__SCREAMING_SNAKE_CASE : Dict = 'resnet'
__SCREAMING_SNAKE_CASE : int = 'pixel_values'
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if isinstance(_lowerCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->Dict:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = value
a__ : List[str] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Any = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = config
SCREAMING_SNAKE_CASE : int = ResNetEmbeddings(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ResNetEncoder(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) ->BaseModelOutputWithPoolingAndNoAttention:
SCREAMING_SNAKE_CASE : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = self.embedder(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.encoder(
_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : Tuple = self.pooler(_lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = config.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = ResNetModel(_lowerCamelCase )
# classification head
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->ImageClassifierOutputWithNoAttention:
SCREAMING_SNAKE_CASE : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Tuple = self.resnet(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : List[str] = self.classifier(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : Any = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Any = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : Optional[Any] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , a__ , )
class a_ ( a__ , a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Tuple:
super().__init__(_lowerCamelCase )
super()._init_backbone(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [config.embedding_size] + config.hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = ResNetEmbeddings(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ResNetEncoder(_lowerCamelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@replace_return_docstrings(output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) ->BackboneOutput:
SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[str] = self.embedder(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = self.encoder(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = outputs.hidden_states
SCREAMING_SNAKE_CASE : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowerCamelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowerCamelCase , )
| 313 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __snake_case ( nn.Module ):
def __init__( self : Dict , _snake_case : Optional[Any] = 16 , _snake_case : Optional[Any] = 88 , _snake_case : Tuple = None , _snake_case : Optional[int] = 1 , _snake_case : Tuple = 0.0 , _snake_case : Optional[Any] = 32 , _snake_case : Any = None , _snake_case : Any = False , _snake_case : Any = None , _snake_case : str = None , _snake_case : Optional[int] = "geglu" , _snake_case : List[Any] = None , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case , attention_head_dim=_snake_case , in_channels=_snake_case , num_layers=_snake_case , dropout=_snake_case , norm_num_groups=_snake_case , cross_attention_dim=_snake_case , attention_bias=_snake_case , sample_size=_snake_case , num_vector_embeds=_snake_case , activation_fn=_snake_case , num_embeds_ada_norm=_snake_case , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ = [1, 0]
def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : int=None , _snake_case : Optional[int] = True , ):
"""simple docstring"""
UpperCAmelCase_ = hidden_states
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ = self.transformer_index_for_condition[i]
UpperCAmelCase_ = self.transformers[transformer_index](
_snake_case , encoder_hidden_states=_snake_case , timestep=_snake_case , cross_attention_kwargs=_snake_case , return_dict=_snake_case , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case)
| 51 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowerCamelCase : Dict = ''
__lowerCamelCase : List[Any] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowerCamelCase , __lowerCamelCase : List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowerCamelCase : Optional[int] = [1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
__lowerCamelCase : str = 0
for j in range(len(__snake_case ) ):
__lowerCamelCase : Any = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowerCamelCase : Any = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowerCamelCase : Optional[int] = j - k + 1 # noqa: E741
__lowerCamelCase : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowerCamelCase : int = length[j]
__lowerCamelCase : Optional[Any] = j
# create that string
__lowerCamelCase : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : List[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "mgp-str"
def __init__( self : List[Any] , A : Optional[int]=[3_2, 1_2_8] , A : Tuple=4 , A : Optional[Any]=3 , A : List[str]=2_7 , A : Dict=3_8 , A : Union[str, Any]=5_0_2_5_7 , A : Any=3_0_5_2_2 , A : Dict=7_6_8 , A : Dict=1_2 , A : int=1_2 , A : int=4.0 , A : str=True , A : Optional[int]=False , A : List[Any]=1e-5 , A : int=0.0 , A : Tuple=0.0 , A : str=0.0 , A : Optional[int]=False , A : Dict=0.02 , **A : List[Any] , ) ->Optional[Any]:
super().__init__(**A )
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = max_token_length
lowerCamelCase__ : Tuple = num_character_labels
lowerCamelCase__ : Optional[int] = num_bpe_labels
lowerCamelCase__ : str = num_wordpiece_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[str] = mlp_ratio
lowerCamelCase__ : List[Any] = distilled
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : Optional[int] = drop_rate
lowerCamelCase__ : Optional[int] = qkv_bias
lowerCamelCase__ : Dict = attn_drop_rate
lowerCamelCase__ : Optional[Any] = drop_path_rate
lowerCamelCase__ : Tuple = output_aa_attentions
lowerCamelCase__ : Optional[int] = initializer_range
| 142 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
'''simple docstring'''
from math import factorial
__A : Dict = {str(digit): factorial(digit) for digit in range(10)}
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) )
def UpperCamelCase_ ( A__ : Any = 60 , A__ : Tuple = 1_00_00_00 ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ) or not isinstance(__snake_case , __snake_case ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCAmelCase_ : List[Any] = 0
# the cached sizes of the previous chains
lowerCAmelCase_ : Dict = {}
for start_chain_element in range(1 , __snake_case ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ : List[str] = set()
lowerCAmelCase_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ : Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__snake_case )
chain_set_length += 1
lowerCAmelCase_ : List[str] = digit_factorial_sum(__snake_case )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ : Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 120 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = OpenAIGPTTokenizer
lowerCAmelCase__ = OpenAIGPTTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case: Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__snake_case: Optional[Any] = dict(zip(A , range(len(A ) ) ) )
__snake_case: Tuple = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__snake_case: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase__ ( self : Any , A : Optional[Any] ):
return "lower newer", "lower newer"
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__snake_case: Any = """lower"""
__snake_case: Union[str, Any] = ["""low""", """er</w>"""]
__snake_case: Optional[int] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: int = tokens + ["""<unk>"""]
__snake_case: int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Any = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
__snake_case: Optional[int] = """This is a simple input"""
__snake_case: Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case: Optional[int] = ("""This is a simple input""", """This is a pair""")
__snake_case: int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def UpperCAmelCase__ ( self : int ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
pass
| 111 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : Optional[int] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 0 |
import os
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = os.path.dirname(os.path.realpath(__snake_case ) )
__lowerCAmelCase: str = os.path.join(__snake_case , 'triangle.txt' )
with open(__snake_case ) as f:
__lowerCAmelCase: str = f.readlines()
__lowerCAmelCase: Optional[Any] = []
for line in triangle:
__lowerCAmelCase: Tuple = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__snake_case ) )
a.append(__snake_case )
for i in range(1 , len(__snake_case ) ):
for j in range(len(a[i] ) ):
__lowerCAmelCase: Dict = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCAmelCase: int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__snake_case , __snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 322 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case : int = logging.get_logger(__name__)
__snake_case : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
for attribute in key.split("""."""):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ : str = """lm_head"""
a_ : Dict = getattr(__snake_case , __snake_case)
if weight_type is not None:
a_ : Any = getattr(__snake_case , __snake_case).shape
else:
a_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ : Tuple = value
elif weight_type == "weight_g":
a_ : Union[str, Any] = value
elif weight_type == "weight_v":
a_ : int = value
elif weight_type == "bias":
a_ : Union[str, Any] = value
else:
a_ : Any = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : Optional[Any] = []
a_ : Any = fairseq_model.state_dict()
a_ : List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == """group""" , )
a_ : str = True
else:
for key, mapped_key in MAPPING.items():
a_ : Dict = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
a_ : Any = True
if "*" in mapped_key:
a_ : Tuple = name.split(__snake_case)[0].split(""".""")[-2]
a_ : Any = mapped_key.replace("""*""" , __snake_case)
if "weight_g" in name:
a_ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
a_ : List[str] = """weight_v"""
elif "bias" in name:
a_ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : Optional[int] = """weight"""
else:
a_ : Union[str, Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
continue
if not is_used:
unused_weights.append(__snake_case)
logger.warning(f'''Unused weights: {unused_weights}''')
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : List[str] = full_name.split("""conv_layers.""")[-1]
a_ : Tuple = name.split(""".""")
a_ : Optional[int] = int(items[0])
a_ : List[str] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__snake_case)
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True):
'''simple docstring'''
if config_path is not None:
a_ : int = UniSpeechConfig.from_pretrained(__snake_case)
else:
a_ : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ : int = Dictionary.load_from_json(__snake_case)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ : Union[str, Any] = target_dict.pad_index
a_ : List[Any] = target_dict.bos_index
a_ : Optional[int] = target_dict.eos_index
a_ : List[str] = len(target_dict.symbols)
a_ : Dict = os.path.join(__snake_case , """vocab.json""")
if not os.path.isdir(__snake_case):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__snake_case))
return
os.makedirs(__snake_case , exist_ok=__snake_case)
a_ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ : int = 4_2
a_ : Optional[Any] = 4_3
with open(__snake_case , """w""" , encoding="""utf-8""") as vocab_handle:
json.dump(__snake_case , __snake_case)
a_ : Tuple = WavaVecaPhonemeCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__snake_case , )
a_ : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
a_ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
a_ : Optional[Any] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case)
processor.save_pretrained(__snake_case)
a_ : Dict = UniSpeechForCTC(__snake_case)
else:
a_ : Any = UniSpeechForPreTraining(__snake_case)
if is_finetuned:
a_ , a_ , a_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1]), """w2v_path""": checkpoint_path})
else:
a_ , a_ , a_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
a_ : int = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , __snake_case)
hf_unispeech.save_pretrained(__snake_case)
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case : Tuple = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 248 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
A : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowercase_ ( _A : Any , _A : Optional[Any] , _A : Dict , _A : int , _A : List[Any] ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__ : Any = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase__ : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase__ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase__ : Dict = value
elif weight_type == "weight_v":
lowerCamelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCamelCase__ : Union[str, Any] = value
else:
lowerCamelCase__ : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase_ ( _A : str , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase__ : Optional[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : int = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase__ : Optional[int] = True
if "*" in mapped_key:
lowerCamelCase__ : List[str] = name.split(__snake_case )[0].split("." )[-2]
lowerCamelCase__ : Any = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
lowerCamelCase__ : Dict = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ : int = "weight_v"
elif "bias" in name:
lowerCamelCase__ : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Tuple = "weight"
else:
lowerCamelCase__ : int = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase_ ( _A : Dict , _A : List[Any] , _A : Union[str, Any] , _A : int , _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] = full_name.split("conv_layers." )[-1]
lowerCamelCase__ : Optional[Any] = name.split("." )
lowerCamelCase__ : Optional[Any] = int(items[0] )
lowerCamelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase__ : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase__ : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase__ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase_ ( _A : Optional[Any] , _A : Optional[int] , _A : Dict=None , _A : Optional[int]=None , _A : Dict=True ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = UniSpeechSatConfig.from_pretrained(__snake_case )
else:
lowerCamelCase__ : Dict = UniSpeechSatConfig()
lowerCamelCase__ : Dict = ""
if is_finetuned:
lowerCamelCase__ : str = UniSpeechSatForCTC(__snake_case )
else:
lowerCamelCase__ : Optional[int] = UniSpeechSatForPreTraining(__snake_case )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase__ : Dict = model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A : Tuple = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 184 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_( a__ = 1_000_000 , a__ = 10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 313 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.