code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Callable:
@wraps(_a )
def _inner_fn(*__lowerCAmelCase , **__lowerCAmelCase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _a , )
return fn(*_a , **_a )
return _inner_fn
| 33 |
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = """examples/"""
_lowerCAmelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowerCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowerCAmelCase = """README.md"""
def lowercase ( _a ,_a ,_a ) -> List[Any]:
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase_: List[Any] = replace.replace("VERSION" ,_a )
UpperCAmelCase_: str = re_pattern.sub(_a ,_a )
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(_a )
def lowercase ( _a ) -> List[str]:
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_a ,_a ) ,_a ,pattern="examples" )
def lowercase ( _a ,_a=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a ,_a ,_a )
if not patch:
update_version_in_examples(_a )
def lowercase ( ) -> List[str]:
UpperCAmelCase_: int = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase_: Dict = "1. Want to contribute a new model?"
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: Tuple = f.readlines()
# Find the start of the list.
UpperCAmelCase_: Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_: Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase_: str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(_a )
def lowercase ( ) -> int:
with open(REPLACE_FILES["init"] ,"r" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_: List[Any] = REPLACE_PATTERNS["init"][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def lowercase ( _a=False ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase_: int = default_version.base_version
elif patch:
UpperCAmelCase_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase_: int = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase_: Dict = input(f"Which version are you releasing? [{default_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: Union[str, Any] = default_version
print(f"Updating version to {version}." )
global_version_update(_a ,patch=_a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ) -> Union[str, Any]:
UpperCAmelCase_: Any = get_version()
UpperCAmelCase_: List[Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase_: int = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_: Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: str = dev_version
print(f"Updating version to {version}." )
global_version_update(_a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 137 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _a : Union[str, Any] , _a : Optional[int] , _a : Dict , _a : List[str] , _a : List[Any]=True , _a : Optional[Any]="pt" ):
"""simple docstring"""
A = {"""add_prefix_space""": True} if isinstance(_a , _a ) and not line.startswith(""" """ ) else {}
A = padding_side
return tokenizer(
[line] , max_length=_a , padding="""max_length""" if pad_to_max_length else None , truncation=_a , return_tensors=_a , add_special_tokens=_a , **_a , )
def _A ( _a : Union[str, Any] , _a : Any , _a : Dict=None , ):
"""simple docstring"""
A = input_ids.ne(_a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="train" ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="" ,) -> Optional[Any]:
super().__init__()
A = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" )
A = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" )
A = self.get_char_lens(self.src_file )
A = max_source_length
A = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A = tokenizer
A = prefix
if n_obs is not None:
A = self.src_lens[:n_obs]
A = src_lang
A = tgt_lang
def __len__( self ) -> str:
return len(self.src_lens )
def __getitem__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
A = index + 1 # linecache starts at 1
A = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase_ ).rstrip("""\n""" )
A = linecache.getline(str(self.tgt_file ) ,lowerCamelCase_ ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
)
A = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
A = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_source_length ,"""right""" )
A = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_target_length ,"""right""" )
A = source_inputs["""input_ids"""].squeeze()
A = target_inputs["""input_ids"""].squeeze()
A = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> Union[str, Any]:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
A = torch.stack([x["""input_ids"""] for x in batch] )
A = torch.stack([x["""attention_mask"""] for x in batch] )
A = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
A = trim_batch(lowerCamelCase_ ,lowerCamelCase_ )
A , A = trim_batch(lowerCamelCase_ ,lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
A = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase =getLogger(__name__)
def _A ( _a : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(_a ) )
def _A ( _a : str ):
"""simple docstring"""
A = get_git_info()
save_json(_a , os.path.join(_a , """git_log.json""" ) )
def _A ( _a : Optional[Any] , _a : str , _a : Optional[int]=4 , **_a : str ):
"""simple docstring"""
with open(_a , """w""" ) as f:
json.dump(_a , _a , indent=_a , **_a )
def _A ( _a : Optional[Any] ):
"""simple docstring"""
with open(_a ) as f:
return json.load(_a )
def _A ( ):
"""simple docstring"""
A = git.Repo(search_parent_directories=_a )
A = {
"""repo_id""": str(_a ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _A ( _a : Callable , _a : Iterable ):
"""simple docstring"""
return list(map(_a , _a ) )
def _A ( _a : Optional[Any] , _a : List[Any] ):
"""simple docstring"""
with open(_a , """wb""" ) as f:
return pickle.dump(_a , _a )
def _A ( _a : Union[str, Any] ):
"""simple docstring"""
def remove_articles(_a : Optional[Any] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , _a )
def white_space_fix(_a : int ):
return " ".join(text.split() )
def remove_punc(_a : int ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_a : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def _A ( _a : Optional[Any] , _a : List[str] ):
"""simple docstring"""
A = normalize_answer(_a ).split()
A = normalize_answer(_a ).split()
A = Counter(_a ) & Counter(_a )
A = sum(common.values() )
if num_same == 0:
return 0
A = 1.0 * num_same / len(_a )
A = 1.0 * num_same / len(_a )
A = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _a : int , _a : int ):
"""simple docstring"""
return normalize_answer(_a ) == normalize_answer(_a )
def _A ( _a : List[str] , _a : List[str] ):
"""simple docstring"""
assert len(_a ) == len(_a )
A = 0
for hypo, pred in zip(_a , _a ):
em += exact_match_score(_a , _a )
if len(_a ) > 0:
em /= len(_a )
return {"em": em}
def _A ( _a : Optional[Any] ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def _A ( _a : Optional[Any] , _a : Dict , _a : Union[str, Any] ):
"""simple docstring"""
A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A = """dropout_rate"""
for p in extra_params:
if getattr(_a , _a , _a ):
if not hasattr(_a , _a ) and not hasattr(_a , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_a ) )
delattr(_a , _a )
continue
A = p if hasattr(_a , _a ) else equivalent_param[p]
setattr(_a , _a , getattr(_a , _a ) )
delattr(_a , _a )
return hparams, config
| 255 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = 42
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self ,lowerCamelCase_ = 3_2 ,lowerCamelCase_ = 6_4 ,lowerCamelCase_ = 2_0 ,lowerCamelCase_ = 7_6_8 ,lowerCamelCase_=7_7 ,lowerCamelCase_=4 ,lowerCamelCase_ = 0.0 ,lowerCamelCase_ = "silu" ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "linear" ,lowerCamelCase_ = "prd" ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,) -> Optional[int]:
super().__init__()
A = num_attention_heads
A = attention_head_dim
A = num_attention_heads * attention_head_dim
A = additional_embeddings
A = time_embed_dim or inner_dim
A = embedding_proj_dim or embedding_dim
A = clip_embed_dim or embedding_dim
A = Timesteps(lowerCamelCase_ ,lowerCamelCase_ ,0 )
A = TimestepEmbedding(lowerCamelCase_ ,lowerCamelCase_ ,out_dim=lowerCamelCase_ ,act_fn=lowerCamelCase_ )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
if embedding_proj_norm_type is None:
A = None
elif embedding_proj_norm_type == "layer":
A = nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
if encoder_hid_proj_type is None:
A = None
elif encoder_hid_proj_type == "linear":
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
A = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,lowerCamelCase_ ) )
if added_emb_type == "prd":
A = nn.Parameter(torch.zeros(1 ,1 ,lowerCamelCase_ ) )
elif added_emb_type is None:
A = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
A = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,dropout=lowerCamelCase_ ,activation_fn="""gelu""" ,attention_bias=lowerCamelCase_ ,)
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
A = nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
A = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
A = nn.LayerNorm(lowerCamelCase_ )
A = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ )
A = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_00_00.0 )
causal_attention_mask.triu_(1 )
A = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" ,lowerCamelCase_ ,persistent=lowerCamelCase_ )
A = nn.Parameter(torch.zeros(1 ,lowerCamelCase_ ) )
A = nn.Parameter(torch.zeros(1 ,lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase__ ( self ) -> Dict[str, AttentionProcessor]:
A = {}
def fn_recursive_add_processors(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ):
if hasattr(lowerCamelCase_ ,"""set_processor""" ):
A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' ,lowerCamelCase_ ,lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return processors
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
A = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ):
if hasattr(lowerCamelCase_ ,"""set_processor""" ):
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' ,lowerCamelCase_ ,lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,) -> Tuple:
A = hidden_states.shape[0]
A = timestep
if not torch.is_tensor(lowerCamelCase_ ):
A = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(lowerCamelCase_ ,dtype=timesteps.dtype ,device=timesteps.device )
A = self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A = timesteps_projected.to(dtype=self.dtype )
A = self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
A = self.embedding_proj_norm(lowerCamelCase_ )
A = self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A = self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A = self.proj_in(lowerCamelCase_ )
A = self.positional_embedding.to(hidden_states.dtype )
A = []
A = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A = hidden_states[:, None, :]
A = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ ,-1 ,-1 )
additional_embeds.append(lowerCamelCase_ )
A = torch.cat(
lowerCamelCase_ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A = F.pad(
lowerCamelCase_ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
A = hidden_states + positional_embeddings
if attention_mask is not None:
A = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
A = F.pad(lowerCamelCase_ ,(0, self.additional_embeddings) ,value=0.0 )
A = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
A = self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
A = block(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
A = self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
A = hidden_states[:, -1]
else:
A = hidden_states[:, additional_embeddings_len:]
A = self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
A = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowercase = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a , a , a=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__snake_case = torch.load(__lowerCAmelCase , map_location='cpu' )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__snake_case = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__snake_case = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def lowerCamelCase__ ( a , a , a , a , ):
def is_key_or_prefix_key_in_dict(a ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__snake_case = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__snake_case = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__snake_case = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__snake_case = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
__snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__snake_case = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__snake_case = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__snake_case = pt_tuple_key[-2] + '_v'
if name is not None:
__snake_case = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( a , a ):
# convert pytorch tensor to numpy
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__snake_case = flax_model.params['params']
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(__lowerCAmelCase )
__snake_case = {}
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
import torch
# Load the index
__snake_case = {}
for shard_file in shard_filenames:
# load using msgpack utils
__snake_case = torch.load(__lowerCAmelCase )
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flax_model.params['params']
__snake_case = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(__lowerCAmelCase )
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
__snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__snake_case = getattr(__lowerCAmelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , 'rb' ) as state_f:
try:
__snake_case = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( a , a ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda a : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
__snake_case = flatten_dict(__lowerCAmelCase )
__snake_case = pt_model.state_dict()
__snake_case = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__snake_case = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple[0] == pt_model.base_model_prefix
__snake_case = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__snake_case = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__snake_case = '.'.join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__snake_case = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__snake_case = key.split('.' )
__snake_case = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__snake_case = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__snake_case = key_components[-2] + '_v'
if name is not None:
__snake_case = key_components[:-3] + [name]
__snake_case = '.'.join(__lowerCAmelCase )
__snake_case = key
if flax_key in special_pt_names:
__snake_case = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
__snake_case = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 356 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , ) -> Dict:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self , A_ , A_=False ) -> Optional[Any]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['shortest_edge'] * h / w )
UpperCamelCase = self.size['shortest_edge']
elif w > h:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = self.size['shortest_edge']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
UpperCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# prepare image and target
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'image_id': 39_769, 'annotations': target}
# encode them
UpperCamelCase = DetaImageProcessor()
UpperCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# prepare image, target and masks_path
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase = DetaImageProcessor(format='coco_panoptic' )
UpperCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
UpperCamelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
UpperCamelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 3 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 | 1 |
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
SCREAMING_SNAKE_CASE = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
SCREAMING_SNAKE_CASE = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 556 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[List[PIL.Image.Image], np.ndarray]
UpperCamelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : np.ndarray
UpperCamelCase_ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 556 | 1 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]="attention" ) -> str:
'''simple docstring'''
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowercase = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
lowercase = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowercase = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowercase = (wi_a, wi_a)
else:
lowercase = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowercase = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , *, lowerCAmelCase__ :int , lowerCAmelCase__ :bool ) -> List[str]:
'''simple docstring'''
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowercase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old["""decoder/decoder_norm/scale"""]
lowercase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :bool ) -> int:
'''simple docstring'''
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
lowercase = convert_tax_to_pytorch(lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ )
lowercase = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :bool = False ) -> Tuple:
'''simple docstring'''
lowercase = TaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = TaEncoderModel(lowerCAmelCase__ )
else:
lowercase = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'deta'
snake_case__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=900 , __lowerCAmelCase=2048 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=1024 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=True , __lowerCAmelCase=300 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.2_5 , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
lowercase = backbone_config
lowercase = num_queries
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = auxiliary_loss
lowercase = position_embedding_type
# deformable attributes
lowercase = num_feature_levels
lowercase = encoder_n_points
lowercase = decoder_n_points
lowercase = two_stage
lowercase = two_stage_num_proposals
lowercase = with_box_refine
lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ):
"""simple docstring"""
return self.d_model
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 359 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __A ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7_6_8 ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
__lowercase = proj_size
__lowercase = CLIPVisionModel(UpperCamelCase__ )
__lowercase = PaintByExampleMapper(UpperCamelCase__ )
__lowercase = nn.LayerNorm(config.hidden_size )
__lowercase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowercase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A ( self , snake_case_ , snake_case_=False ) -> Tuple:
'''simple docstring'''
__lowercase = self.model(pixel_values=UpperCamelCase__ )
__lowercase = clip_output.pooler_output
__lowercase = self.mapper(latent_states[:, None] )
__lowercase = self.final_layer_norm(UpperCamelCase__ )
__lowercase = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = (config.num_hidden_layers + 1) // 5
__lowercase = config.hidden_size
__lowercase = 1
__lowercase = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
for block in self.blocks:
__lowercase = block(UpperCamelCase__ )
return hidden_states
| 709 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a : Optional[int] = datasets.logging.get_logger(__name__)
a : Tuple = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
a : Union[str, Any] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
a : Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
a : Tuple = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowercase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(snake_case_ , snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = self.scorer.score(references=snake_case_ , candidates=snake_case_ )
return {"scores": scores}
| 527 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = original_name.split("." )[0]
_lowerCamelCase : List[Any] = key.split("." )
_lowerCamelCase : Dict = int(key_list[key_list.index(_lowerCAmelCase ) - 2] )
_lowerCamelCase : Union[str, Any] = int(key_list[key_list.index(_lowerCAmelCase ) - 1] )
_lowerCamelCase : Optional[int] = orig_block_num - offset
_lowerCamelCase : Dict = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase , _lowerCamelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCamelCase : List[str] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : Dict = key[: key.find("proj" )]
_lowerCamelCase : int = key.replace(_lowerCAmelCase , F'patch_embeddings.{total_embed_found}.' )
_lowerCamelCase : Tuple = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : List[Any] = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCamelCase : Tuple = key.replace("head" , "classifier" )
_lowerCamelCase : Tuple = value
return new_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = model_name[-3:]
_lowerCamelCase : str = 1000
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[Any] = (1, 1000)
# set config attributes
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : List[Any] = [2, 2, 6, 2]
_lowerCamelCase : Optional[int] = [64, 128, 320, 512]
_lowerCamelCase : Any = 4.0
_lowerCamelCase : int = 0.9
elif size == "s24":
_lowerCamelCase : List[str] = [4, 4, 12, 4]
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 4.0
_lowerCamelCase : Dict = 0.9
elif size == "s36":
_lowerCamelCase : List[str] = [6, 6, 18, 6]
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Optional[int] = 1E-6
_lowerCamelCase : Union[str, Any] = 0.9
elif size == "m36":
_lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
_lowerCamelCase : Dict = [96, 192, 384, 768]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
_lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
_lowerCamelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCamelCase : List[str] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : str = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_lowerCamelCase : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
# Prepare image
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCamelCase : Dict = rename_keys(_lowerCAmelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Define image processor
_lowerCamelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
_lowerCamelCase : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : Tuple = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_lowerCamelCase : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_lowerCamelCase : Tuple = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_lowerCamelCase : Dict = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_lowerCamelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 44 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( A = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_a : Union[str, Any] = BeautifulSoup(requests.get(A ).text , 'html.parser' )
_a : int = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(A , A )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 120 | 0 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __a ):
def __init__(self : Tuple , *snake_case_ : str , **snake_case_ : List[Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 521 |
import numpy
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase__ : numpy.ndarray , lowerCAmelCase__ : numpy.ndarray ) -> None:
snake_case__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case__ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case__ = numpy.zeros(output_array.shape )
def UpperCAmelCase_ ( self : Dict ) -> numpy.ndarray:
snake_case__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase_ ( self : List[Any] ) -> None:
snake_case__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : numpy.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
snake_case__ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : numpy.ndarray ) -> int:
snake_case__ = input_arr
snake_case__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowercase ( __UpperCamelCase : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def _lowercase ( __UpperCamelCase : numpy.ndarray ):
return (value) * (1 - (value))
def _lowercase ( ):
snake_case__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case__ = TwoHiddenLayerNeuralNetwork(
input_array=__UpperCamelCase , output_array=__UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__UpperCamelCase , iterations=10 , give_loss=__UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 214 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->None:
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 437 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict="relu6" , UpperCAmelCase_ : Optional[int]=1_280 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[int]=None , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Dict =image_size
lowerCamelCase__: Tuple =depth_multiplier
lowerCamelCase__: Tuple =depth_divisible_by
lowerCamelCase__: List[str] =min_depth
lowerCamelCase__: List[str] =expand_ratio
lowerCamelCase__: Union[str, Any] =tf_padding
lowerCamelCase__: Optional[Any] =output_stride
lowerCamelCase__: Tuple =first_layer_is_expansion
lowerCamelCase__: Any =finegrained_output
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCamelCase__: int =classifier_dropout_prob
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Any =is_training
lowerCamelCase__: Dict =num_labels
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Dict =None
lowerCamelCase__: int =None
if self.use_labels:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =MobileNetVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Dict =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.num_labels
lowerCamelCase__: Optional[int] =MobileNetVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: List[str] =MobileNetVaForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =config_and_inputs
lowerCamelCase__: Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =MobileNetVaModelTester(self)
lowerCamelCase__: Union[str, Any] =MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Tuple =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.hidden_states
lowerCamelCase__: List[str] =16
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Optional[int] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[int] =MobileNetVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Optional[Any] =torch.Size((1, 1_001))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([0.2445, -1.1993, 0.1905]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: str =model.to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =outputs.logits
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 437 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case: Union[str, Any] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
snake_case , snake_case: Union[str, Any] = emb.weight.shape
snake_case: int = nn.Linear(__A , __A , bias=__A )
snake_case: Tuple = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
snake_case: Dict = torch.load(__A , map_location='cpu' )
snake_case: List[str] = Namespace(**checkpoint['cfg']['model'] )
snake_case: int = checkpoint['model']
remove_ignore_keys_(__A )
snake_case: List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
snake_case: Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
snake_case: Optional[Any] = XGLMConfig(
vocab_size=__A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
snake_case: str = XGLMForCausalLM(__A )
snake_case: List[str] = model.load_state_dict(__A , strict=__A )
print(__A )
snake_case: Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 329 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 50 ):
'''simple docstring'''
snake_case: Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }') | 329 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Union[str, Any] = None
def UpperCamelCase( ):
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : str = Node(2 )
UpperCAmelCase : int = Node(3 )
UpperCAmelCase : Any = Node(4 )
UpperCAmelCase : Dict = Node(5 )
return tree
def UpperCamelCase( UpperCAmelCase_ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
if root is None:
return output
UpperCAmelCase : Optional[Any] = deque([root] )
while process_queue:
UpperCAmelCase : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase( UpperCAmelCase_ ):
if root is None:
return []
UpperCAmelCase : list[Sequence[Node | None]] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = height(_A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_A , _A ) )
UpperCAmelCase : str = 1
else:
output.append(get_nodes_from_right_to_left(_A , _A ) )
UpperCAmelCase : int = 0
return output
def UpperCamelCase( ): # Main function for testing.
UpperCAmelCase : str = make_tree()
print(F"""In-order Traversal: {inorder(_A )}""" )
print(F"""Pre-order Traversal: {preorder(_A )}""" )
print(F"""Post-order Traversal: {postorder(_A )}""" , '\n' )
print(F"""Height of Tree: {height(_A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(_A , level=_A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=10 , _lowerCamelCase=3 , _lowerCamelCase=32 * 4 , _lowerCamelCase=32 * 6 , _lowerCamelCase=4 , _lowerCamelCase=32 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_auxiliary_loss
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_size
lowerCAmelCase_ = max_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = mask_feature_size
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
lowerCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
lowerCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = output.encoder_hidden_states
lowerCAmelCase_ = output.pixel_decoder_hidden_states
lowerCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
with torch.no_grad():
lowerCAmelCase_ = MaskFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase_ = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
lowerCAmelCase_ = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = MaskFormerForInstanceSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
lowerCAmelCase_ = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
lowerCAmelCase_ = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
__A : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : Optional[int] = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
__A : str = False
__A : Optional[int] = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = MaskFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCAmelCase_ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_lowerCamelCase )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ = MaskFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = (self.model_tester.min_size,) * 2
lowerCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
lowerCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCamelCase )
lowerCAmelCase_ = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_lowerCamelCase ).to(_lowerCamelCase )
lowerCAmelCase_ = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowerCAmelCase_ = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowerCAmelCase_ = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
lowerCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : str =1e-4
def snake_case_ ( ) -> List[str]:
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_lowerCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
lowerCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase )
lowerCAmelCase_ = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
lowerCAmelCase_ = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
lowerCAmelCase_ = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCamelCase )
.eval()
)
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
lowerCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_lowerCamelCase )
.eval()
)
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
lowerCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCamelCase )
.eval()
)
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ = inputs['''pixel_values'''].to(_lowerCamelCase )
lowerCAmelCase_ = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 274 | '''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( __snake_case : Tuple=None , __snake_case : int=None) -> int:
return field(default_factory=lambda: default , metadata=__snake_case)
@dataclass
class __UpperCAmelCase :
__A : str = field(
metadata={'help': 'The csv file to plot.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__A : bool = field(
default=__a , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__A : Optional[str] = field(
default=__a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__A : Optional[List[str]] = list_field(
default=__a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case_ ( __snake_case : Optional[Any]) -> Dict:
try:
int(__snake_case)
return True
except ValueError:
return False
def snake_case_ ( __snake_case : Dict) -> int:
try:
float(__snake_case)
return True
except ValueError:
return False
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase ):
lowerCAmelCase_ = args
lowerCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
lowerCAmelCase_ = csv.DictReader(_lowerCamelCase )
for row in reader:
lowerCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
lowerCAmelCase_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
lowerCAmelCase_ = float(row['''result'''] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = plt.subplots()
lowerCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
lowerCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
lowerCAmelCase_ = self.result_dict[model_name]['''result''']
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , )
else:
lowerCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
lowerCAmelCase_ = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )]
plt.scatter(
_lowerCamelCase , _lowerCamelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase_ = title_str[:-4]
lowerCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_lowerCamelCase )
plt.xlabel(_lowerCamelCase )
plt.ylabel(_lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ) -> Tuple:
lowerCAmelCase_ = HfArgumentParser(__snake_case)
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ = Plot(args=__snake_case)
plot.plot()
if __name__ == "__main__":
main()
| 274 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def __init__( self , a__ , a__ = None , a__ = None , a__ = True , a__ = None , a__ = False , a__ = None , a__ = True , a__ = "arrow" , **a__ , ):
super().__init__(
split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , **a__ , )
_lowerCamelCase = load_from_cache_file
_lowerCamelCase = file_format
_lowerCamelCase = Spark(
df=a__ , features=a__ , cache_dir=a__ , working_dir=a__ , **a__ , )
def _UpperCAmelCase ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 710 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("transformers.models.speecht5")
_UpperCAmelCase = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_UpperCAmelCase = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_UpperCAmelCase = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_UpperCAmelCase = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_UpperCAmelCase = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_UpperCAmelCase = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_UpperCAmelCase = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_UpperCAmelCase = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCAmelCase = []
_UpperCAmelCase = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(_a , _a )
if weight_type is not None:
_lowerCamelCase = getattr(_a , _a ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "running_mean":
_lowerCamelCase = value
elif weight_type == "running_var":
_lowerCamelCase = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = []
if task == "s2t":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2T
_lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCamelCase = None
_lowerCamelCase = MAPPING_T2S
_lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCamelCase = MAPPING_S2S
_lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_a , _a ):
logger.info(F'''{name} was ignored''' )
continue
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCamelCase , _lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(_a )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
_lowerCamelCase = '''weight'''
elif "running_mean" in name:
_lowerCamelCase = '''running_mean'''
elif "running_var" in name:
_lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCamelCase = '''num_batches_tracked'''
else:
_lowerCamelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( _a , _a , _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_a )
@torch.no_grad()
def _lowerCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , ):
"""simple docstring"""
if config_path is not None:
_lowerCamelCase = SpeechTaConfig.from_pretrained(_a )
else:
_lowerCamelCase = SpeechTaConfig()
if task == "s2t":
_lowerCamelCase = config.max_text_positions
_lowerCamelCase = SpeechTaForSpeechToText(_a )
elif task == "t2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = 6_0_0
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForTextToSpeech(_a )
elif task == "s2s":
_lowerCamelCase = 1_8_7_6
_lowerCamelCase = config.max_speech_positions
_lowerCamelCase = SpeechTaForSpeechToSpeech(_a )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
_lowerCamelCase = SpeechTaTokenizer(_a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
_lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_lowerCamelCase = SpeechTaFeatureExtractor()
_lowerCamelCase = SpeechTaProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(_a )
_lowerCamelCase = torch.load(_a )
recursively_load_weights(fairseq_checkpoint['''model'''] , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'open-llama'
def __init__( self: List[Any] ,__lowerCAmelCase: int=100_000 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: Optional[int]=11_008 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: int="silu" ,__lowerCAmelCase: Dict=2_048 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Any=1e-6 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = rms_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : str = kwargs.pop(
"use_memorry_efficient_attention" ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_dropout_prob
_lowerCamelCase : Optional[int] = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,tie_word_embeddings=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,__lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get("type" ,__lowerCAmelCase )
_lowerCamelCase : int = self.rope_scaling.get("factor" ,__lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def lowerCamelCase_ ( _a : Accelerator , _a : int = 16 , _a : str = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_a : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Optional[Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase_ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( _a : List[Any] , _a : str , _a : str , _a : Dict ):
'''simple docstring'''
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**UpperCAmelCase__ )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ : List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCAmelCase_ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( _a : Optional[Any] , _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : List[Any] = config["""lr"""]
UpperCAmelCase_ : int = int(config["""num_epochs"""] )
UpperCAmelCase_ : Dict = int(config["""seed"""] )
UpperCAmelCase_ : Optional[Any] = int(config["""batch_size"""] )
UpperCAmelCase_ : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCAmelCase_ : Tuple = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCAmelCase_ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : str = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[str] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCAmelCase_ : str = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Optional[int] = args.resume_from_checkpoint.split("""epoch_""" )[1]
UpperCAmelCase_ : str = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(UpperCAmelCase__ ) + 1
UpperCAmelCase_ : Optional[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print("""resumed checkpoint performance:""" , UpperCAmelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , """r""" ) as f:
UpperCAmelCase_ : Any = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : Optional[int] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCAmelCase_ : Union[str, Any] = model(**UpperCAmelCase__ )
UpperCAmelCase_ : Dict = outputs.loss
UpperCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : List[str] = F'''epoch_{epoch}'''
UpperCAmelCase_ : Dict = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
UpperCAmelCase_ : Dict = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ : Optional[int] = accuracy
UpperCAmelCase_ : List[str] = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]["""lr"""]
UpperCAmelCase_ : List[str] = epoch
UpperCAmelCase_ : List[str] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , """w""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=UpperCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCAmelCase__ , )
parser.add_argument(
"""--output_dir""" , type=UpperCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCAmelCase__ , default=2 , help="""Number of train epochs.""" , )
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : Tuple = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 708 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
'''simple docstring'''
A__ : Any = BlenderbotConfig
A__ : List[str] = {}
A__ : Tuple = "gelu"
def __init__( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: List[Any]=37 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[int]=20 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: Any=0 ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : str = bos_token_id
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = TFBlenderbotModel(config=lowerCamelCase_ ).get_decoder()
UpperCAmelCase_ : List[str] = inputs_dict["""input_ids"""]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase_ : str = inputs_dict["""head_mask"""]
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-3 )
def lowerCamelCase_ ( _a : Any , _a : Tuple , _a : Any , _a : Optional[int]=None , _a : int=None , _a : int=None , _a : int=None , _a : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : Optional[int] = False
A__ : Union[str, Any] = False
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : str = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = ["My friends are cool but they eat too many carbs."]
A__ : Optional[int] = "facebook/blenderbot-400M-distill"
@cached_property
def A__ ( self: Optional[Any] ) -> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self: Tuple ) -> List[str]:
UpperCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" )
UpperCAmelCase_ : Tuple = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowerCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 322 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Any ) -> int:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
if frame_sampling_rate is not None:
lowercase__ = frame_sampling_rate
if num_frames is not None:
lowercase__ = num_frames
lowercase__ = {}
if top_k is not None:
lowercase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: List[Any] , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Union[str, Any]=1 ) -> int:
"""simple docstring"""
if num_frames is None:
lowercase__ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase__ = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase__ = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase__ = 0
lowercase__ = num_frames * frame_sampling_rate - 1
lowercase__ = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase__ = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase__ = list(UpperCamelCase_ )
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: List[str]=5 ) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase__ = self.model.config.num_labels
if self.framework == "pt":
lowercase__ = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase__ = scores.tolist()
lowercase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 43 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a = False
a = False
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return TrainCommand(UpperCAmelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
lowercase_ = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=UpperCamelCase__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=UpperCamelCase__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=UpperCamelCase__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=UpperCamelCase__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=UpperCamelCase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=UpperCamelCase__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=UpperCamelCase__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=UpperCamelCase__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=UpperCamelCase__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=UpperCamelCase__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Union[str, Any] , UpperCamelCase__ : Namespace ):
'''simple docstring'''
lowercase_ = logging.get_logger("""transformers-cli/training""" )
lowercase_ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=UpperCamelCase__ )
lowercase_ = args.output
lowercase_ = args.column_label
lowercase_ = args.column_text
lowercase_ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowercase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowercase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowercase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ = args.validation_split
lowercase_ = args.train_batch_size
lowercase_ = args.valid_batch_size
lowercase_ = args.learning_rate
lowercase_ = args.adam_epsilon
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 412 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : List[Any]=32 , snake_case : Optional[Any]=2 , snake_case : Dict=3 , snake_case : Union[str, Any]=16 , snake_case : Optional[int]=[32, 64, 128] , snake_case : int=[1, 2, 1] , snake_case : Any=[2, 2, 4] , snake_case : Optional[int]=2 , snake_case : List[str]=2.0 , snake_case : Any=True , snake_case : Optional[Any]=0.0 , snake_case : str=0.0 , snake_case : List[Any]=0.1 , snake_case : List[str]="gelu" , snake_case : Any=False , snake_case : Optional[int]=True , snake_case : List[str]=0.02 , snake_case : List[Any]=1E-5 , snake_case : str=True , snake_case : Any=None , snake_case : int=True , snake_case : Optional[Any]=10 , snake_case : List[str]=8 , snake_case : List[str]=["stage1", "stage2"] , snake_case : Optional[Any]=[1, 2] , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = patch_norm
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = is_training
__UpperCamelCase = scope
__UpperCamelCase = use_labels
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = encoder_stride
__UpperCamelCase = out_features
__UpperCamelCase = out_indices
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self : int , snake_case : Optional[int] , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = FocalNetModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : int ):
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCamelCase = None
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : Any ):
__UpperCamelCase = FocalNetForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] ):
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : str ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Optional[Any] = False
def snake_case ( self : Any ):
__UpperCamelCase = FocalNetModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , embed_dim=37 , has_text_modality=snake_case )
def snake_case ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[Any] ):
return
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def snake_case ( self : int ):
pass
def snake_case ( self : Tuple ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def snake_case ( self : Dict ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# FocalNet has a different seq_length
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = reshaped_hidden_states[0].shape
__UpperCamelCase = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def snake_case ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = 3
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = FocalNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def snake_case ( self : List[str] ):
__UpperCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCamelCase = image_processor(images=snake_case , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : Dict = FocalNetConfig
lowerCAmelCase__ : List[str] = False
def snake_case ( self : Dict ):
__UpperCamelCase = FocalNetModelTester(self )
| 375 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
snake_case__ : str = 'Usage of script: script_name <size_of_canvas:int>'
snake_case__ : Tuple = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def lowerCamelCase__ ( _lowerCamelCase ) ->list[list[bool]]:
_UpperCAmelCase =[[False for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
return canvas
def lowerCamelCase__ ( _lowerCamelCase ) ->None:
for i, row in enumerate(_lowerCamelCase ):
for j, _ in enumerate(_lowerCamelCase ):
_UpperCAmelCase =bool(random.getrandbits(1 ) )
def lowerCamelCase__ ( _lowerCamelCase ) ->list[list[bool]]:
_UpperCAmelCase =np.array(_lowerCamelCase )
_UpperCAmelCase =np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCamelCase ):
for c, pt in enumerate(_lowerCamelCase ):
_UpperCAmelCase =__judge_point(
_lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCAmelCase =next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCAmelCase =current_canvas.tolist()
return return_canvas
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
_UpperCAmelCase =0
_UpperCAmelCase =0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCAmelCase =pt
if pt:
if alive < 2:
_UpperCAmelCase =False
elif alive == 2 or alive == 3:
_UpperCAmelCase =True
elif alive > 3:
_UpperCAmelCase =False
else:
if alive == 3:
_UpperCAmelCase =True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
snake_case__ : Any = int(sys.argv[1])
# main working structure of this module.
snake_case__ : Optional[Any] = create_canvas(canvas_size)
seed(c)
snake_case__, snake_case__ : Dict = plt.subplots()
fig.show()
snake_case__ : Any = ListedColormap(['w', 'k'])
try:
while True:
snake_case__ : int = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 408 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Any = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""mobilenet_v2"""
def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ):
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =depth_multiplier
_UpperCAmelCase =depth_divisible_by
_UpperCAmelCase =min_depth
_UpperCAmelCase =expand_ratio
_UpperCAmelCase =output_stride
_UpperCAmelCase =first_layer_is_expansion
_UpperCAmelCase =finegrained_output
_UpperCAmelCase =hidden_act
_UpperCAmelCase =tf_padding
_UpperCAmelCase =classifier_dropout_prob
_UpperCAmelCase =initializer_range
_UpperCAmelCase =layer_norm_eps
_UpperCAmelCase =semantic_loss_ignore_index
class _a ( A__ ):
"""simple docstring"""
snake_case =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1E-4
| 408 | 1 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = 'pt'
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 708 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : Dict = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'google/rembert': 256,
}
_SCREAMING_SNAKE_CASE : Dict = '▁'
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Tuple="[CLS]" , _UpperCamelCase : List[str]="[SEP]" , _UpperCamelCase : Optional[int]="<unk>" , _UpperCamelCase : int="[SEP]" , _UpperCamelCase : List[Any]="<pad>" , _UpperCamelCase : List[str]="[CLS]" , _UpperCamelCase : str="[MASK]" , **_UpperCamelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase: Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase) if isinstance(_UpperCamelCase , _UpperCamelCase) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
_lowercase: Optional[Any] = do_lower_case
_lowercase: int = remove_space
_lowercase: List[str] = keep_accents
_lowercase: Dict = vocab_file
_lowercase: Union[str, Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None):
_lowercase: Dict = [self.sep_token_id]
_lowercase: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase)) + [1] + ([0] * len(_UpperCamelCase)) + [1]
return [1] + ([0] * len(_UpperCamelCase)) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None):
_lowercase: int = [self.sep_token_id]
_lowercase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None):
if not os.path.isdir(_UpperCamelCase):
logger.error("Vocabulary path ({}) should be a directory".format(_UpperCamelCase))
return
_lowercase: Optional[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase):
copyfile(self.vocab_file , _UpperCamelCase)
return (out_vocab_file,)
| 226 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : List[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]):
import nltk
nltk.download("wordnet")
if NLTK_VERSION >= version.Version("3.6.5"):
nltk.download("punkt")
if NLTK_VERSION >= version.Version("3.6.6"):
nltk.download("omw-1.4")
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int=0.9 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Dict=0.5):
if NLTK_VERSION >= version.Version("3.6.5"):
_lowercase: List[str] = [
meteor_score.single_meteor_score(
word_tokenize(_UpperCamelCase) , word_tokenize(_UpperCamelCase) , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
else:
_lowercase: Optional[int] = [
meteor_score.single_meteor_score(_UpperCamelCase , _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
return {"meteor": np.mean(_UpperCamelCase)}
| 226 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase ( a_: float, a_: float ):
return math.pow(a_, 2 ) - a
def __UpperCAmelCase ( a_: float ):
return 2 * x
def __UpperCAmelCase ( a_: float ):
_UpperCAmelCase : Union[str, Any] = 2.0
while start <= a:
_UpperCAmelCase : Union[str, Any] = math.pow(a_, 2 )
return start
def __UpperCAmelCase ( a_: float, a_: int = 9_999, a_: float = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("math domain error" )
_UpperCAmelCase : Optional[Any] = get_initial_point(a_ )
for _ in range(a_ ):
_UpperCAmelCase : Optional[Any] = value
_UpperCAmelCase : Dict = value - fx(a_, a_ ) / fx_derivative(a_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 257 | '''simple docstring'''
from __future__ import annotations
import bisect
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : int = len(a_ )
while lo < hi:
_UpperCAmelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : str = mid + 1
else:
_UpperCAmelCase : int = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
if hi < 0:
_UpperCAmelCase : str = len(a_ )
while lo < hi:
_UpperCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Union[str, Any] = mid
return lo
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_left(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int = 0, a_: int = -1 ):
sorted_collection.insert(bisect_right(a_, a_, a_, a_ ), a_ )
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Tuple = len(a_ ) - 1
while left <= right:
_UpperCAmelCase : List[str] = left + (right - left) // 2
_UpperCAmelCase : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[int] = midpoint - 1
else:
_UpperCAmelCase : Union[str, Any] = midpoint + 1
return None
def __UpperCAmelCase ( a_: list[int], a_: int ):
_UpperCAmelCase : int = bisect.bisect_left(a_, a_ )
if index != len(a_ ) and sorted_collection[index] == item:
return index
return None
def __UpperCAmelCase ( a_: list[int], a_: int, a_: int, a_: int ):
if right < left:
return None
_UpperCAmelCase : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a_, a_, a_, midpoint - 1 )
else:
return binary_search_by_recursion(a_, a_, midpoint + 1, a_ )
if __name__ == "__main__":
__a = input('Enter numbers separated by comma:\n').strip()
__a = sorted(int(item) for item in user_input.split(','))
__a = int(input('Enter a single number to be found in the list:\n'))
__a = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 257 | 1 |
'''simple docstring'''
import math
import random
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase__ = 0.02
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
snake_case : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCamelCase__ ):
# Forward propagation
snake_case : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
snake_case : List[str] = (expected / 100) - layer_a
# Error delta
snake_case : Optional[Any] = layer_1_error * sigmoid_function(lowerCamelCase__ , lowerCamelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = int(input("Expected value: "))
lowercase__ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 638 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]):
for attribute in key.split('.'):
lowerCamelCase : int = getattr(UpperCAmelCase__ , UpperCAmelCase__)
if weight_type is not None:
lowerCamelCase : Any = getattr(UpperCAmelCase__ , UpperCAmelCase__).shape
else:
lowerCamelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
lowerCamelCase : str = value
elif weight_type == "weight_g":
lowerCamelCase : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase : Dict = value
elif weight_type == "bias":
lowerCamelCase : Tuple = value
elif weight_type == "running_mean":
lowerCamelCase : List[str] = value
elif weight_type == "running_var":
lowerCamelCase : Dict = value
elif weight_type == "num_batches_tracked":
lowerCamelCase : Union[str, Any] = value
elif weight_type == "inv_freq":
lowerCamelCase : Any = value
else:
lowerCamelCase : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]):
lowerCamelCase : str = []
lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase : List[str] = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
lowerCamelCase : List[Any] = True
if "*" in mapped_key:
lowerCamelCase : Optional[Any] = name.split(UpperCAmelCase__)[0].split('.')[-2]
lowerCamelCase : Union[str, Any] = mapped_key.replace('*' , UpperCAmelCase__)
if "pos_bias_u" in name:
lowerCamelCase : Optional[int] = None
elif "pos_bias_v" in name:
lowerCamelCase : List[str] = None
elif "weight_g" in name:
lowerCamelCase : str = 'weight_g'
elif "weight_v" in name:
lowerCamelCase : Union[str, Any] = 'weight_v'
elif "bias" in name:
lowerCamelCase : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase : List[str] = 'weight'
elif "running_mean" in name:
lowerCamelCase : List[str] = 'running_mean'
elif "inv_freq" in name:
lowerCamelCase : int = 'inv_freq'
elif "running_var" in name:
lowerCamelCase : int = 'running_var'
elif "num_batches_tracked" in name:
lowerCamelCase : Union[str, Any] = 'num_batches_tracked'
else:
lowerCamelCase : str = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
continue
if not is_used:
unused_weights.append(UpperCAmelCase__)
logger.warning(F'''Unused weights: {unused_weights}''')
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str):
lowerCamelCase : Dict = full_name.split('conv_layers.')[-1]
lowerCamelCase : int = name.split('.')
lowerCamelCase : Any = int(items[0])
lowerCamelCase : Dict = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
lowerCamelCase : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
lowerCamelCase : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
lowerCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
lowerCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(UpperCAmelCase__)
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=True):
if config_path is not None:
lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase__ , hidden_act='swish')
else:
lowerCamelCase : Tuple = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase : Optional[int] = 'rotary'
if is_finetuned:
if dict_path:
lowerCamelCase : Dict = Dictionary.load(UpperCAmelCase__)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase : Tuple = target_dict.pad_index
lowerCamelCase : List[Any] = target_dict.bos_index
lowerCamelCase : str = target_dict.eos_index
lowerCamelCase : Dict = len(target_dict.symbols)
lowerCamelCase : Union[str, Any] = os.path.join(UpperCAmelCase__ , 'vocab.json')
if not os.path.isdir(UpperCAmelCase__):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__))
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__)
lowerCamelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase : Any = 0
lowerCamelCase : List[str] = 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
lowerCamelCase : int = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowerCamelCase : int = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__)
processor.save_pretrained(UpperCAmelCase__)
lowerCamelCase : int = WavaVecaConformerForCTC(UpperCAmelCase__)
else:
lowerCamelCase : Union[str, Any] = WavaVecaConformerForPreTraining(UpperCAmelCase__)
if is_finetuned:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
lowerCamelCase : Tuple = argparse.Namespace(task='audio_pretraining')
lowerCamelCase : Any = fairseq.tasks.setup_task(UpperCAmelCase__)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__)
lowerCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned)
hf_wavavec.save_pretrained(UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 449 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = BertTokenizer
_lowerCAmelCase = BertTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : str = 'UNwant\u00E9d,running'
lowerCamelCase : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Dict = 'UNwant\u00E9d,running'
lowerCamelCase : Tuple = tokenizer.tokenize(A )
lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = tokenizer.encode(A, add_special_tokens=A )
lowerCamelCase : List[Any] = rust_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Optional[Any] = tokenizer.encode(A )
lowerCamelCase : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A, A )
# With lower casing
lowerCamelCase : Union[str, Any] = self.get_tokenizer(do_lower_case=A )
lowerCamelCase : List[Any] = self.get_rust_tokenizer(do_lower_case=A )
lowerCamelCase : Optional[int] = 'UNwant\u00E9d,running'
lowerCamelCase : Optional[Any] = tokenizer.tokenize(A )
lowerCamelCase : str = rust_tokenizer.tokenize(A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = tokenizer.encode(A, add_special_tokens=A )
lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Any = tokenizer.encode(A )
lowerCamelCase : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ), ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=A, never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = BasicTokenizer()
lowerCamelCase : Optional[Any] = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase : int = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A ), A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase : Optional[int] = {}
for i, token in enumerate(A ):
lowerCamelCase : int = i
lowerCamelCase : List[Any] = WordpieceTokenizer(vocab=A, unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ), [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ), ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ), ['[UNK]', 'runn', '##ing'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase : Any = tokenizer.encode('sequence builders', add_special_tokens=A )
lowerCamelCase : Tuple = tokenizer.encode('multi-sequence build', add_special_tokens=A )
lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCamelCase : List[str] = tokenizer_r.encode_plus(
A, return_attention_mask=A, return_token_type_ids=A, return_offsets_mapping=A, add_special_tokens=A, )
lowerCamelCase : int = tokenizer_r.do_lower_case if hasattr(A, 'do_lower_case' ) else False
lowerCamelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['的', '人', '有']
lowerCamelCase : List[str] = ''.join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Dict = True
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Union[str, Any] = tokenizer_p.encode(A, add_special_tokens=A )
lowerCamelCase : Dict = tokenizer_r.encode(A, add_special_tokens=A )
lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A )
lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A, A )
self.assertListEqual(A, A )
lowerCamelCase : int = False
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : int = tokenizer_r.encode(A, add_special_tokens=A )
lowerCamelCase : List[str] = tokenizer_p.encode(A, add_special_tokens=A )
lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A )
lowerCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase : Tuple = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A, A )
self.assertListEqual(A, A )
| 449 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE__ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __lowercase ( snake_case ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__magic_name__ :Union[str, Any] = k.replace(snake_case, snake_case )
return k
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = DEFAULTS.copy()
cfg_kwargs.update(snake_case )
__magic_name__ :Optional[Any] = PegasusConfig(**snake_case )
__magic_name__ :List[Any] = PegasusForConditionalGeneration(snake_case )
__magic_name__ :Union[str, Any] = torch_model.model.state_dict()
__magic_name__ :Any = {}
for k, v in tf_weights.items():
__magic_name__ :int = rename_state_dict_key(snake_case )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__magic_name__ :Tuple = v.T
__magic_name__ :Optional[int] = torch.tensor(snake_case, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__magic_name__ :Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__magic_name__ :int = mapping['''shared.weight''']
__magic_name__ :Optional[Any] = mapping['''shared.weight''']
__magic_name__ :int = {k: torch.zeros_like(snake_case ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**snake_case )
__magic_name__ , __magic_name__ :Dict = torch_model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __lowercase ( snake_case="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.train.list_variables(snake_case )
__magic_name__ :Optional[Any] = {}
__magic_name__ :Optional[int] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(snake_case, desc='''converting tf checkpoint to dict''' ):
__magic_name__ :Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__magic_name__ :Optional[int] = tf.train.load_variable(snake_case, snake_case )
__magic_name__ :List[Any] = array
return tf_weights
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = Path(snake_case ).parent.name
__magic_name__ :Union[str, Any] = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
__magic_name__ :int = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''', model_max_length=snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case )
# convert model
__magic_name__ :Optional[int] = get_tf_weights_as_numpy(snake_case )
__magic_name__ :Optional[Any] = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__magic_name__ :Union[str, Any] = task_specific_params
__magic_name__ :Optional[int] = convert_pegasus(snake_case, snake_case )
torch_model.save_pretrained(snake_case )
__magic_name__ :List[str] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(snake_case, Path(snake_case ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (KDPMaDiscreteScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Tuple , **a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**a )
return config
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : List[Any] = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Tuple = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : int = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if torch_device == "mps":
return
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : List[Any] = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : str = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if torch_device == "mps":
return
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : str = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if str(a ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 | 69 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = 0
if start < end:
A__ = randint(lowercase_ , lowercase_ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ , A__ = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = 0
A__ = randint(lowercase_ , lowercase_ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ = new_pivot_index + 1
A__ = a[new_pivot_index]
A__ = a[index]
A__ = temp
A__ = a[new_pivot_index + 1]
A__ = a[end]
A__ = temp
return new_pivot_index + 1, count
_lowerCamelCase : Optional[Any] = TemporaryFile()
_lowerCamelCase : str = 100 # 1000 elements are to be sorted
_lowerCamelCase , _lowerCamelCase : List[str] = 0, 1 # mean and standard deviation
_lowerCamelCase : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
_lowerCamelCase : str = np.load(outfile)
_lowerCamelCase : List[Any] = len(M) - 1
_lowerCamelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 87 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
UpperCamelCase_ : Tuple = random.Random()
def _lowerCAmelCase (_lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
a__ = global_rng
a__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict ,a__ : List[str] ,a__ : Dict=7 ,a__ : List[str]=4_00 ,a__ : List[str]=20_00 ,a__ : Any=1 ,a__ : List[Any]=0.0 ,a__ : str=1_60_00 ,a__ : Dict=True ,a__ : Optional[Any]=80 ,a__ : List[Any]=16 ,a__ : Any=64 ,a__ : str="hann_window" ,a__ : Tuple=80 ,a__ : Optional[Any]=76_00 ,a__ : List[Any]=1e-10 ,a__ : str=True ,):
a__ = parent
a__ = batch_size
a__ = min_seq_length
a__ = max_seq_length
a__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ = feature_size
a__ = padding_value
a__ = sampling_rate
a__ = do_normalize
a__ = num_mel_bins
a__ = hop_length
a__ = win_length
a__ = win_function
a__ = fmin
a__ = fmax
a__ = mel_floor
a__ = return_attention_mask
def lowerCAmelCase_ ( self : List[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Tuple ,a__ : Optional[int]=False ,a__ : Tuple=False ):
def _flatten(a__ : Tuple ):
return list(itertools.chain(*_a ) )
if equal_length:
a__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
a__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : Dict ,a__ : List[Any]=False ,a__ : List[str]=False ):
if equal_length:
a__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
a__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : List[Any] ):
a__ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Optional[int] ,a__ : Optional[int] ):
self.assertTrue(np.all(np.mean(_a ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ,axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase_ ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
a__ = feat_extract(speech_inputs[0] ,return_tensors="np" ).input_values
a__ = feat_extract(np_speech_inputs[0] ,return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test batched
a__ = feat_extract(_a ,return_tensors="np" ).input_values
a__ = feat_extract(_a ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
def lowerCAmelCase_ ( self : str ):
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = ["""longest""", """max_length""", """do_not_pad"""]
a__ = [None, 16_00, None]
for max_length, padding in zip(_a ,_a ):
a__ = feat_extract(_a ,padding=_a ,max_length=_a ,return_tensors="np" )
a__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = range(8_00 ,14_00 ,2_00 )
a__ = [floats_list((1, x) )[0] for x in lengths]
a__ = ["""longest""", """max_length""", """do_not_pad"""]
a__ = [None, 16_00, None]
for max_length, padding in zip(_a ,_a ):
a__ = feat_extract(_a ,max_length=_a ,padding=_a )
a__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feat_extract(
_a ,truncation=_a ,max_length=10_00 ,padding="max_length" ,return_tensors="np" )
a__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feat_extract(
_a ,truncation=_a ,max_length=10_00 ,padding="longest" ,return_tensors="np" )
a__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feat_extract(
_a ,truncation=_a ,max_length=20_00 ,padding="longest" ,return_tensors="np" )
a__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = np.random.rand(1_00 ).astype(np.floataa )
a__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__ = feature_extractor(audio_target=_a ,padding=_a ,return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__ = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_values
a__ = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test batched
a__ = feature_extractor(_a ,return_tensors="np" ).input_values
a__ = feature_extractor(_a ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
a__ = np.asarray(_a )
a__ = feature_extractor(_a ,return_tensors="np" ).input_values
a__ = feature_extractor(_a ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a ,_a ):
self.assertTrue(np.allclose(_a ,_a ,atol=1e-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.feat_extract_tester.prepare_inputs_for_target()
a__ = self.feature_extraction_class(**self.feat_extract_dict )
a__ = feat_extract.model_input_names[0]
a__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
a__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__ = BatchFeature({input_name: speech_inputs} ,tensor_type="np" )
a__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__ = self.feature_extraction_class(**self.feat_extract_dict )
a__ = feat_extract.model_input_names[0]
a__ = BatchFeature({input_name: speech_inputs} ,tensor_type="pt" )
a__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : str ):
a__ = self.feature_extraction_class(**self.feat_extract_dict )
a__ = self.feat_extract_tester.prepare_inputs_for_target()
a__ = feat_extract.model_input_names[0]
a__ = BatchFeature({input_name: speech_inputs} )
a__ = feat_extract.num_mel_bins # hack!
a__ = feat_extract.pad(_a ,padding="longest" ,return_tensors="np" )[input_name]
a__ = feat_extract.pad(_a ,padding="longest" ,return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowerCAmelCase_ ( self : Any ):
a__ = self.feat_extract_dict
a__ = True
a__ = self.feature_extraction_class(**_a )
a__ = self.feat_extract_tester.prepare_inputs_for_target()
a__ = [len(_a ) for x in speech_inputs]
a__ = feat_extract.model_input_names[0]
a__ = BatchFeature({input_name: speech_inputs} )
a__ = feat_extract.num_mel_bins # hack!
a__ = feat_extract.pad(_a ,padding="longest" ,return_tensors="np" )
self.assertIn("attention_mask" ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.feat_extract_dict
a__ = True
a__ = self.feature_extraction_class(**_a )
a__ = self.feat_extract_tester.prepare_inputs_for_target()
a__ = [len(_a ) for x in speech_inputs]
a__ = feat_extract.model_input_names[0]
a__ = BatchFeature({input_name: speech_inputs} )
a__ = min(_a )
a__ = feat_extract.num_mel_bins # hack!
a__ = feat_extract.pad(
_a ,padding="max_length" ,max_length=_a ,truncation=_a ,return_tensors="np" )
self.assertIn("attention_mask" ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : Optional[int] ):
from datasets import load_dataset
a__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
a__ = ds.sort("id" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Union[str, Any] ):
# fmt: off
a__ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
a__ = self._load_datasamples(1 )
a__ = SpeechTaFeatureExtractor()
a__ = feature_extractor(_a ,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape ,(1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] ,_a ,atol=1e-6 ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
# fmt: off
a__ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a__ = self._load_datasamples(1 )
a__ = SpeechTaFeatureExtractor()
a__ = feature_extractor(audio_target=_a ,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape ,(1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] ,_a ,atol=1e-4 ) )
| 710 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Tuple = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 394 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case ( __a ):
raise NotImplementedError()
@abstractmethod
def snake_case ( self ):
raise NotImplementedError()
| 636 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =(KDPMaDiscreteScheduler,)
__UpperCAmelCase : Optional[Any] =1_0
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def snake_case ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 636 | 1 |
'''simple docstring'''
import re
import subprocess
import sys
UpperCamelCase__ : Union[str, Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ : str = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
UpperCamelCase__ : str = '|'.join(sys.argv[1:])
UpperCamelCase__ : str = re.compile(rf'^({joined_dirs}).*?\.py$')
UpperCamelCase__ : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 711 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[Any]:
A_ : Optional[Any] = max_length
A_ : List[str] = max_position_embeddings
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
A_ : Dict = input_ids.shape[-1]
A_ : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , _lowerCamelCase , )
A_ : Dict = start_length
A_ : List[Any] = max_new_tokens
A_ : Optional[int] = start_length + max_new_tokens
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[Any]:
A_ : List[Any] = max_time
A_ : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return any(criteria(_lowerCamelCase , _lowerCamelCase ) for criteria in self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( a_ , a_ ) -> StoppingCriteriaList:
"""simple docstring"""
A_ : Optional[Any] = stopping_criteria.max_length
A_ : Dict = deepcopy(a_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , a_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a_ ) )
return new_stopping_criteria
| 385 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
a__ =XLMTokenizer
a__ =False
def __lowerCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_UpperCAmelCase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCAmelCase : Tuple = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = """lower newer"""
_UpperCAmelCase : int = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
_UpperCAmelCase : Dict = """lower"""
_UpperCAmelCase : List[str] = ["""low""", """er</w>"""]
_UpperCAmelCase : Tuple = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_UpperCAmelCase : List[str] = tokens + ["""<unk>"""]
_UpperCAmelCase : Union[str, Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : List[str] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
_UpperCAmelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
_UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 506 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : list[list[int]] = []
lowerCamelCase : list[int] = []
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Dict = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , ) -> None:
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
__lowerCamelCase :Dict = [3, 34, 4, 12, 5, 2]
__lowerCamelCase :int = 9
__lowerCamelCase :Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 222 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowercase : List[Any] =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Tuple ={
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Union[str, Any] ={
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : List[str] ={
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Union[str, Any] ={
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowercase : List[Any] ={
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowercase : List[Any] ={
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowercase : Union[str, Any] ={
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : Optional[Any] ={
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : Dict ={
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRContextEncoderTokenizer
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = DPRQuestionEncoderTokenizer
_lowercase : Union[str, Any] =collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowercase : int =collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowercase : Dict =R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowercase__ )
class _SCREAMING_SNAKE_CASE :
def __call__( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Union[bool, str] = False , __UpperCamelCase : Union[bool, str] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[bool] = None , **__UpperCamelCase : str , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Tuple = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Union[str, Any] = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : List[str] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : Tuple = len(__UpperCamelCase )
snake_case__ : int = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.'''
snake_case__ : Union[str, Any] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
snake_case__ : Dict = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
snake_case__ : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : str = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCamelCase : BatchEncoding , __UpperCamelCase : DPRReaderOutput , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 64 , __UpperCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
snake_case__ : str = reader_input['''input_ids''']
snake_case__ : Union[str, Any] = reader_output[:3]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : Any = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : Optional[int] = len(__UpperCamelCase )
snake_case__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : List[int] , __UpperCamelCase : int , __UpperCamelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
snake_case__ : Optional[int] = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Tuple = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Tuple = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
snake_case__ : int = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowercase__ )
class _SCREAMING_SNAKE_CASE (lowercase__, lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = READER_PRETRAINED_VOCAB_FILES_MAP
A__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = READER_PRETRAINED_INIT_CONFIGURATION
A__ = ['input_ids', 'attention_mask']
A__ = DPRReaderTokenizer
| 712 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_SCREAMING_SNAKE_CASE : Optional[int] = 'Usage of script: script_name <size_of_canvas:int>'
_SCREAMING_SNAKE_CASE : List[str] = [0] * 100 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: str = [[False for i in range(__magic_name__ )] for j in range(__magic_name__ )]
return canvas
def __lowerCAmelCase ( __magic_name__ ):
for i, row in enumerate(__magic_name__ ):
for j, _ in enumerate(__magic_name__ ):
_lowercase: List[Any] = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Optional[int] = np.array(__magic_name__ )
_lowercase: Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__magic_name__ ):
for c, pt in enumerate(__magic_name__ ):
_lowercase: Dict = __judge_point(
__magic_name__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_lowercase: Tuple = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_lowercase: list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Union[str, Any] = 0
_lowercase: str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_lowercase: Optional[int] = pt
if pt:
if alive < 2:
_lowercase: Union[str, Any] = False
elif alive == 2 or alive == 3:
_lowercase: Optional[Any] = True
elif alive > 3:
_lowercase: List[Any] = False
else:
if alive == 3:
_lowercase: List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_SCREAMING_SNAKE_CASE : Tuple = int(sys.argv[1])
# main working structure of this module.
_SCREAMING_SNAKE_CASE : Any = create_canvas(canvas_size)
seed(c)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = plt.subplots()
fig.show()
_SCREAMING_SNAKE_CASE : str = ListedColormap(['w', 'k'])
try:
while True:
_SCREAMING_SNAKE_CASE : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 226 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class A :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=13 , _UpperCamelCase : Union[str, Any]=7 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Any=0.0 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=512 , _UpperCamelCase : str=16 , _UpperCamelCase : str=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Tuple=None , ):
_lowercase: Any = parent
_lowercase: int = batch_size
_lowercase: Tuple = seq_length
_lowercase: Any = is_training
_lowercase: Any = use_input_mask
_lowercase: Union[str, Any] = use_token_type_ids
_lowercase: int = use_labels
_lowercase: int = vocab_size
_lowercase: int = hidden_size
_lowercase: Any = num_hidden_layers
_lowercase: Tuple = num_attention_heads
_lowercase: List[str] = intermediate_multiple_size
_lowercase: Dict = hidden_act
_lowercase: Optional[int] = hidden_dropout
_lowercase: Optional[int] = attention_dropout
_lowercase: Dict = weight_tying
_lowercase: Union[str, Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: str = type_sequence_label_size
_lowercase: Optional[int] = initializer_range
_lowercase: List[Any] = num_labels
_lowercase: Any = num_choices
_lowercase: Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Optional[Any] = None
if self.use_input_mask:
_lowercase: List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase: Dict = None
if self.use_labels:
_lowercase: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase: Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[int]):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int):
_lowercase , _lowercase , _lowercase , _lowercase: int = self.prepare_config_and_inputs()
_lowercase: str = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict):
_lowercase: Dict = GPTNeoXJapaneseModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase)
_lowercase: Optional[int] = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any):
_lowercase: Tuple = True
_lowercase: Optional[int] = GPTNeoXJapaneseModel(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str):
_lowercase: Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any):
_lowercase: Tuple = True
_lowercase: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
# first forward pass
_lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase)
_lowercase: str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowercase: Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_lowercase: List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
_lowercase: List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
_lowercase: Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase)
_lowercase: Optional[int] = output_from_no_past["hidden_states"][0]
_lowercase: Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )["hidden_states"][0]
# select random slice
_lowercase: Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_lowercase: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase: Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3))
def UpperCAmelCase__ ( self : Dict):
_lowercase: Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase: Tuple = config_and_inputs
_lowercase: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Optional[int] = GPTNeoXJapaneseModelTester(self)
_lowercase: List[str] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : Optional[Any]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str]):
_lowercase , _lowercase , _lowercase , _lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase , _lowercase , _lowercase , _lowercase: int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
# This regression test was failing with PyTorch < 1.3
_lowercase , _lowercase , _lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase: int = None
self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase , _lowercase , _lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : str):
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Any):
_lowercase: List[str] = "abeja/gpt-neox-japanese-2.7b"
_lowercase: Dict = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
_lowercase: Union[str, Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
_lowercase: str = GPTNeoXJapaneseTokenizer.from_pretrained(_UpperCamelCase)
_lowercase: Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(_UpperCamelCase)
_lowercase: List[Any] = []
for prompt in prompts:
_lowercase: List[str] = tokenizer(_UpperCamelCase , return_tensors="pt").input_ids
_lowercase: List[Any] = model.generate(_UpperCamelCase , max_length=50)
_lowercase: str = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
| 226 | 1 |
"""simple docstring"""
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
__A : int = num
__A : int = 0
while num > 0:
__A : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | """simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple =logging.get_logger(__name__)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
__A : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
if "model" in sd.keys():
__A : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# pop unnecessary weights
__A : str = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_SCREAMING_SNAKE_CASE )
__A : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__A : Any = sd.pop(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__A : Tuple = sd[key]
# We split QKV in separate Q,K,V
__A : Any = key.replace('.qkv_proj.' , '.q_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.k_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.v_proj.' )
__A : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__A , __A , __A : List[str] = torch.split(_SCREAMING_SNAKE_CASE , depth // 3 , dim=0 )
__A : Optional[int] = q
__A : int = k
__A : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
'''simple docstring'''
__A : Dict = load_checkpoint(_SCREAMING_SNAKE_CASE )
if config is not None:
__A : Any = OPTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__A : Tuple = OPTConfig()
__A : Any = OPTModel(_SCREAMING_SNAKE_CASE ).half().eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check results
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCamelCase : Dict =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 237 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
return round(float(moles / volume ) * nfactor )
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): # noqa: E741
while r - l > 1:
__a : Tuple = (l + r) // 2
if v[m] >= key:
__a : Dict = m
else:
__a : Dict = m # noqa: E741
return r
def __UpperCamelCase ( lowerCAmelCase__ : list[int] ):
if len(lowerCAmelCase__ ) == 0:
return 0
__a : List[str] = [0] * len(lowerCAmelCase__ )
__a : Any = 1
__a : int = v[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
if v[i] < tail[0]:
__a : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
__a : Optional[Any] = v[i]
length += 1
else:
__a : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A__ : int = logging.getLogger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = '''token-classification'''
def __init__( self , __UpperCamelCase )-> Union[str, Any]:
if type(__UpperCamelCase ) == dict:
UpperCAmelCase__ : int = Namespace(**__UpperCamelCase )
UpperCAmelCase__ : Dict = import_module("tasks" )
try:
UpperCAmelCase__ : Optional[int] = getattr(__UpperCamelCase , hparams.task_type )
UpperCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCAmelCase__ : List[Any] = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(__UpperCamelCase , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Dict:
return self.model(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : Any = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Union[str, Any] = self(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase__ : Any = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : str = torch.load(__UpperCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCAmelCase__ : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.token_classification_task.convert_examples_to_features(
__UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> DataLoader:
UpperCAmelCase__ : Optional[Any] = self._feature_file(__UpperCamelCase )
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.load(__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase__ : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase__ : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
"""Compute validation""" ""
UpperCAmelCase__ : Tuple = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Dict = self(**__UpperCamelCase )
UpperCAmelCase__ : str = outputs[:2]
UpperCAmelCase__ : str = logits.detach().cpu().numpy()
UpperCAmelCase__ : Optional[int] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCAmelCase__ : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCAmelCase__ : int = np.argmax(__UpperCamelCase , axis=2 )
UpperCAmelCase__ : Any = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCAmelCase__ : Dict = dict(enumerate(self.labels ) )
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase__ : List[Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(__UpperCamelCase , __UpperCamelCase ),
"precision": precision_score(__UpperCamelCase , __UpperCamelCase ),
"recall": recall_score(__UpperCamelCase , __UpperCamelCase ),
"f1": fa_score(__UpperCamelCase , __UpperCamelCase ),
}
UpperCAmelCase__ : Union[str, Any] = dict(results.items() )
UpperCAmelCase__ : Dict = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
# when stable
UpperCAmelCase__ : Optional[Any] = self._eval_end(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase__ : List[str] = self._eval_end(__UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase__ : List[str] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=__UpperCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=__UpperCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=__UpperCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A__ : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
A__ : Dict = parser.parse_args()
A__ : Union[str, Any] = NERTransformer(args)
A__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A__ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
A__ : str = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 704 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _A ( lowerCAmelCase , lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'resnet'
snake_case__ : Union[str, Any] = ['basic', 'bottleneck']
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=64 , __lowerCAmelCase=[256, 512, 1024, 2048] , __lowerCAmelCase=[3, 4, 6, 3] , __lowerCAmelCase="bottleneck" , __lowerCAmelCase="relu" , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = downsample_in_first_stage
lowercase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = version.parse('1.11' )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-3
| 359 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'mra'
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = block_per_row
lowercase = approx_mode
lowercase = initial_prior_first_n_blocks
lowercase = initial_prior_diagonal_n_blocks
| 359 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : int = ['image_processor', 'tokenizer']
_lowerCAmelCase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowerCAmelCase : List[Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : str , lowerCAmelCase__ : int=None , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE : Optional[Any] = features['''words''']
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
SCREAMING_SNAKE_CASE : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE : Any = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
SCREAMING_SNAKE_CASE : Any = images
return encoded_inputs
def __lowercase ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}""" )
return images_with_overflow
def __lowercase ( self : Any , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 464 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase_ : Optional[Any] = '.'
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Any = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase_ : List[str] = line.strip()
lowerCAmelCase_ : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase_ : Dict = '\n'.join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 464 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __lowercase ( __snake_case ):
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
import PIL.Image
UpperCAmelCase = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__lowerCamelCase ) as mock_cast_to_python_objects:
UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
UpperCAmelCase , UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __lowerCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
UpperCAmelCase = pa.ipc.open_stream(lowerCAmelCase_ )
UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = pa.BufferOutputStream()
UpperCAmelCase = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) ->Any:
UpperCAmelCase = pa.BufferOutputStream()
UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowerCAmelCase_ , features=lowerCAmelCase_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase = pa.BufferReader(output.getvalue() )
UpperCAmelCase = pa.ipc.open_stream(lowerCAmelCase_ )
UpperCAmelCase = f.read_all()
UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Union[str, Any]:
UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase_ , ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase_ , ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ , hash_salt="""split_name""" , check_duplicates=lowerCAmelCase_ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
UpperCAmelCase = pa.BufferOutputStream()
UpperCAmelCase = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = pa.BufferOutputStream()
UpperCAmelCase = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = pa.BufferOutputStream()
UpperCAmelCase = pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ , schema=lowerCAmelCase_ , writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) ->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
UpperCAmelCase = os.path.join(lowerCAmelCase_ , """test.arrow""" )
with ArrowWriter(path=lowerCAmelCase_ , schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_ , 1 )
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
if isinstance(lst[0] , lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCAmelCase_ )
else:
UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
UpperCAmelCase = pa.array(TypedSequence(lowerCAmelCase_ , optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
# in range
UpperCAmelCase = pa.array(OptimizedTypedSequence(lowerCAmelCase_ , col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = pa.array(OptimizedTypedSequence(lowerCAmelCase_ , col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=lowerCAmelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def _UpperCamelCase ( ) ->List[str]:
UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase , UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase = pa.BufferReader(output.getvalue() )
UpperCAmelCase = pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
import PIL.Image
UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCAmelCase_ , format="""png""" )
UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_ , features=Features({"""image""": Image()} ) , embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
UpperCAmelCase = pa.BufferReader(output.getvalue() )
UpperCAmelCase = pq.read_table(lowerCAmelCase_ )
UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) ->Dict:
UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowerCAmelCase_ )] )
UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 377 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 2
while digits < n:
index += 1
UpperCAmelCase = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377 | 1 |
"""simple docstring"""
import pprint
import requests
lowerCAmelCase__ = 'https://zenquotes.io/api'
def _lowerCamelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _lowerCamelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowerCAmelCase__ = random_quotes()
pprint.pprint(response) | 711 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
SCREAMING_SNAKE_CASE__ : List[str] = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
SCREAMING_SNAKE_CASE__ : Dict = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__ : str = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__ : List[str] = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__ : Dict = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
SCREAMING_SNAKE_CASE__ : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
SCREAMING_SNAKE_CASE__ : str = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
SCREAMING_SNAKE_CASE__ : Dict = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Tuple = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : List[str] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__ : Tuple = """"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
SCREAMING_SNAKE_CASE__ : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
assert ReadMe.from_string(__lowerCamelCase , __lowerCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
with pytest.raises(__lowerCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
UpperCAmelCase__ : str = ReadMe.from_string(__lowerCamelCase , __lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(__lowerCamelCase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
ReadMe.from_string(__lowerCamelCase , __lowerCamelCase , suppress_parsing_errors=__lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[Any] = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : str = ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : str = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : int = expected_error.format(path=__lowerCamelCase )
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
UpperCAmelCase__ : str = ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : int = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
UpperCAmelCase__ : List[str] = expected_error.format(path=__lowerCamelCase )
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase ( __lowerCamelCase ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = Path(__lowerCamelCase ) / """README.md"""
with open(__lowerCamelCase , """w+""" ) as readme_file:
readme_file.write(__lowerCamelCase )
ReadMe.from_readme(__lowerCamelCase , __lowerCamelCase , suppress_parsing_errors=__lowerCamelCase )
| 79 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : int):
if number < 0:
raise ValueError('number must not be negative')
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : list[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) ->int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["""fail_state"""]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->dict[str, list[int]]:
"""simple docstring"""
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["""fail_state"""]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 | 1 |
from scipy.stats import spearmanr
import datasets
A_: Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
A_: Optional[int] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
A_: List[Any] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
'''simple docstring'''
_lowercase = spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 398 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
UpperCamelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase = model(__magic_name__ )["""last_hidden_state"""]
UpperCamelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
UpperCamelCase = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 386 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Optional[int] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : Tuple = rotary_dim
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : int = None
__lowerCAmelCase : Optional[Any] = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Tuple = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[Any] = None
if self.use_input_mask:
__lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : int = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : List[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : List[Any] = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : str = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Tuple = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Dict = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = model.config.eos_token_id
__lowerCAmelCase : List[Any] = jax.jit(model.generate )
__lowerCAmelCase : Tuple = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = fx_state
with torch.no_grad():
__lowerCAmelCase : Tuple = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Optional[int] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : Dict = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : int = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 549 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 | 1 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
_UpperCAmelCase = f'{src_lang}-{tgt_lang}'
_UpperCAmelCase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''README.md''' )
print(f'Generating {path}' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
__A : int = Path(__file__).resolve().parent.parent.parent
__A : List[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A , __A , __A : List[Any] = model_name.split("-")
__A : Optional[Any] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 602 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase__ ( A : Dict , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = list(_lowerCamelCase )
UpperCAmelCase = list(_lowerCamelCase )
UpperCAmelCase = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = []
while True:
UpperCAmelCase = ["$"] * len(_lowerCamelCase )
UpperCAmelCase = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase = "*"
UpperCAmelCase = "*"
temp.append('''X''' )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
UpperCAmelCase = list(set(_lowerCamelCase ) )
def lowerCamelCase__ ( A : Any , A : Any ):
'''simple docstring'''
UpperCAmelCase = []
for minterm in minterms:
UpperCAmelCase = ""
for _ in range(_lowerCamelCase ):
UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase__ ( A : List[Any] , A : Optional[int] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = list(_lowerCamelCase )
UpperCAmelCase = list(_lowerCamelCase )
UpperCAmelCase = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase__ ( A : Dict , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
UpperCAmelCase = 0
UpperCAmelCase = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase = j
if count == 1:
UpperCAmelCase = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = 0
for i in range(len(_lowerCamelCase ) ):
UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase = count_n
UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
UpperCAmelCase = 0
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
UpperCAmelCase = prime_implicants[i].count('''_''' )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
UpperCAmelCase = 1
return chart
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase = [
float(_lowerCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = check(_lowerCamelCase )
print('''Prime Implicants are:''' )
print(_lowerCamelCase )
UpperCAmelCase = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = selection(_lowerCamelCase , _lowerCamelCase )
print('''Essential Prime Implicants are:''' )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 582 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase : List[Any] = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase : List[str] = """hopper-medium-v2"""
_UpperCAmelCase : Any = gym.make(env_name)
_UpperCAmelCase : Tuple = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase : Tuple = env.reset()
_UpperCAmelCase : str = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[str] = 1000
_UpperCAmelCase : Optional[int] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = env.step(denorm_actions)
_UpperCAmelCase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase : Dict = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 362 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase ( lowercase_ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase , lowercase =np.shape(lowercase_ )
if rows != columns:
lowercase =(
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowercase_ )
lowercase =np.zeros((rows, columns) )
lowercase =np.zeros((rows, columns) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
lowercase =(table[i][j] - total) / upper[j][j]
lowercase =1
for j in range(lowercase_ , lowercase_ ):
lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
lowercase =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
lowercase =parent
lowercase =13
lowercase =7
lowercase =True
lowercase =True
lowercase =True
lowercase =True
lowercase =99
lowercase =3_84
lowercase =2
lowercase =4
lowercase =37
lowercase ='''gelu'''
lowercase =0.1
lowercase =0.1
lowercase =5_12
lowercase =16
lowercase =2
lowercase =0.02
lowercase =3
lowercase =4
lowercase =1_28
lowercase =2
lowercase =9
lowercase =1
lowercase =None
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertModel(config=snake_case_ )
lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase =[input_ids, input_mask]
lowercase =model(snake_case_ )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertForMaskedLM(config=snake_case_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_labels
lowercase =TFConvBertForSequenceClassification(config=snake_case_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_choices
lowercase =TFConvBertForMultipleChoice(config=snake_case_ )
lowercase =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowercase =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowercase =tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowercase ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_labels
lowercase =TFConvBertForTokenClassification(config=snake_case_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertForQuestionAnswering(config=snake_case_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) =config_and_inputs
lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFConvBertModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
lowercase =True
if hasattr(snake_case_ , '''use_cache''' ):
lowercase =True
lowercase =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''key_length''' , snake_case_ )
for model_class in self.all_model_classes:
lowercase =self._prepare_for_class(snake_case_ , snake_case_ )
lowercase =model_class(snake_case_ )
lowercase =len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
lowercase =os.path.join(snake_case_ , '''saved_model''' , '''1''' )
lowercase =tf.keras.models.load_model(snake_case_ )
lowercase =model(snake_case_ )
if self.is_encoder_decoder:
lowercase =outputs['''encoder_hidden_states''']
lowercase =outputs['''encoder_attentions''']
else:
lowercase =outputs['''hidden_states''']
lowercase =outputs['''attentions''']
self.assertEqual(len(snake_case_ ) , snake_case_ )
lowercase =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _A( self ):
lowercase =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
lowercase =getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''key_length''' , snake_case_ )
lowercase =getattr(self.model_tester , '''key_length''' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
lowercase =len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
lowercase =outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
lowercase =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _A( self ):
lowercase =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase =model(snake_case_ )[0]
lowercase =[1, 6, 7_68]
self.assertEqual(output.shape , snake_case_ )
lowercase =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 145 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE : int = 847
_SCREAMING_SNAKE_CASE : Optional[int] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE : int = 150
_SCREAMING_SNAKE_CASE : Tuple = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE : Tuple = 171
_SCREAMING_SNAKE_CASE : str = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
_SCREAMING_SNAKE_CASE : str = 133
_SCREAMING_SNAKE_CASE : int = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE : Tuple = 19
_SCREAMING_SNAKE_CASE : Any = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
_SCREAMING_SNAKE_CASE : Tuple = 65
_SCREAMING_SNAKE_CASE : List[str] = """mapillary-vistas-id2label.json"""
_SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
return config
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = dct.pop(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = val
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : str = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[-dim :]
# fmt: on
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : str = in_proj_weight[: hidden_size, :]
_SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:config.hidden_size]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[-hidden_size :, :]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[:config.hidden_size]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[hidden_size : hidden_size * 2]
_SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-hidden_size :, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE : str = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = get_maskformer_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
_SCREAMING_SNAKE_CASE : Dict = pickle.load(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# update to torch tensors
for key, value in state_dict.items():
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# load 🤗 model
_SCREAMING_SNAKE_CASE : str = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE__ , param.shape )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
_SCREAMING_SNAKE_CASE : str = prepare_img()
if "vistas" in model_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 65
elif "cityscapes" in model_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = 6_5535
else:
_SCREAMING_SNAKE_CASE : List[Any] = 255
_SCREAMING_SNAKE_CASE : List[str] = True if """ade""" in model_name else False
_SCREAMING_SNAKE_CASE : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 533 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Dict = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
UpperCAmelCase_ : List[str] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Any = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : Tuple = ["""input_ids""", """attention_mask"""]
A_ : Dict = MBartTokenizer
A_ : List[int] = []
A_ : List[int] = []
def __init__( self , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_SCREAMING_SNAKE_CASE : List[str] = vocab_file
_SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE : str = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE : Optional[Any] = src_lang if src_lang is not None else """en_XX"""
_SCREAMING_SNAKE_CASE : str = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
_SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE : Dict = src_lang
_SCREAMING_SNAKE_CASE : List[Any] = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : Dict = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self , __snake_case , __snake_case = "en_XX" , __snake_case = None , __snake_case = "ro_RO" , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
_SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def UpperCAmelCase_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Tuple = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE : str = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 533 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase = (3, 9, -11, 0, 7, 5, 1, -1)
_lowercase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCamelCase__ :
__lowerCamelCase = 42
__lowerCamelCase = 42
class lowerCamelCase__ :
def __init__( self : int , __a : Iterable[int] ):
'''simple docstring'''
lowerCamelCase__: Node | None = None
for i in sorted(__a , reverse=__a ):
lowerCamelCase__: List[str] = Node(__a , self.head )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = self.head
while node:
yield node.data
lowerCamelCase__: Any = node.next_node
def __len__( self : List[Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Tuple ):
'''simple docstring'''
return " -> ".join([str(__a ) for node in self] )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_UpperCamelCase ) + list(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 242 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """fnet"""
def __init__( self : List[Any] , __a : Union[str, Any]=32000 , __a : Any=768 , __a : Tuple=12 , __a : Tuple=3072 , __a : List[Any]="gelu_new" , __a : str=0.1 , __a : Optional[int]=512 , __a : int=4 , __a : str=0.02 , __a : Dict=1e-12 , __a : Optional[Any]=False , __a : Any=512 , __a : Any=3 , __a : int=1 , __a : Optional[int]=2 , **__a : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
lowerCamelCase__: Tuple = vocab_size
lowerCamelCase__: str = max_position_embeddings
lowerCamelCase__: str = hidden_size
lowerCamelCase__: str = num_hidden_layers
lowerCamelCase__: Union[str, Any] = intermediate_size
lowerCamelCase__: List[str] = hidden_act
lowerCamelCase__: str = hidden_dropout_prob
lowerCamelCase__: Any = initializer_range
lowerCamelCase__: List[Any] = type_vocab_size
lowerCamelCase__: List[str] = layer_norm_eps
lowerCamelCase__: Dict = use_tpu_fourier_optimizations
lowerCamelCase__: Union[str, Any] = tpu_short_seq_length
| 242 | 1 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
return (-y * np.log(UpperCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Union[str, Any] , snake_case : Any )-> Any:
_lowerCamelCase = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCAmelCase_ ) ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Tuple , snake_case : Any , snake_case : List[str]=70_000 )-> int:
_lowerCamelCase = np.zeros(x.shape[1] )
for iterations in range(UpperCAmelCase_ ):
_lowerCamelCase = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCamelCase = sigmoid_function(UpperCAmelCase_ )
_lowerCamelCase = np.dot(x.T , h - y ) / y.size
_lowerCamelCase = theta - alpha * gradient # updating the weights
_lowerCamelCase = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCamelCase = sigmoid_function(UpperCAmelCase_ )
_lowerCamelCase = cost_function(UpperCAmelCase_ , UpperCAmelCase_ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ : List[str] =datasets.load_iris()
A_ : Union[str, Any] =iris.data[:, :2]
A_ : Union[str, Any] =(iris.target != 0) * 1
A_ : int =0.1
A_ : Tuple =logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("""theta: """, theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Union[str, Any]:
return sigmoid_function(
np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(A_) : Tuple =(x[:, 0].min(), x[:, 0].max())
(A_) : int =(x[:, 1].min(), x[:, 1].max())
(A_) : int =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ : List[str] =np.c_[xxa.ravel(), xxa.ravel()]
A_ : Optional[Any] =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 650 |
import torch
from diffusers import DiffusionPipeline
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[Any] , __a : List[str] ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
def __call__( self : Union[str, Any] ):
snake_case__ : int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case__ : Dict = 1
snake_case__ : str = self.unet(__a , __a ).sample
snake_case__ : Tuple = self.scheduler.step(__a , __a , __a ).prev_sample
snake_case__ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(__a )
return result
| 648 | 0 |
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """contact@muhammadumerfarooq.me"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __magic_name__ ):
def __init__( self : Optional[int] , a : str ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : Union[str, Any] = domain
def _lowerCamelCase ( self : int , a : str , a : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase__ : int = parse.urljoin(self.domain , a )
self.urls.append(a )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE_ ).split('.' )[-2:] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return parse.urlparse(SCREAMING_SNAKE_CASE_ ).netloc
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "https://github.com" ) -> list[str]:
lowerCAmelCase__ : Optional[Any] = get_domain_name(SCREAMING_SNAKE_CASE_ )
# Initialize the parser
lowerCAmelCase__ : Optional[int] = Parser(SCREAMING_SNAKE_CASE_ )
try:
# Open URL
lowerCAmelCase__ : Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase__ : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase__ : int = requests.get(SCREAMING_SNAKE_CASE_ )
# Get the valid email.
lowerCAmelCase__ : Union[str, Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails))) | 69 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(snake_case_ , n - 1 , snake_case_ ) * a) % mod
else:
UpperCAmelCase_ = binary_exponentiation(snake_case_ , n / 2 , snake_case_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE_: Optional[int] =7_01
SCREAMING_SNAKE_CASE_: Any =10_00_00_00_00
SCREAMING_SNAKE_CASE_: Optional[Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 78 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 46 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__magic_name__ = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
__magic_name__ = '''|'''.join(sys.argv[1:])
__magic_name__ = re.compile(rF"""^({joined_dirs}).*?\.py$""")
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 314 |
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = 0
lowercase = len(_UpperCAmelCase ) # No of vertices in graph
lowercase = [0] * n
lowercase = [False] * n
def dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase = True
lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , id_ )
lowercase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase = min(low[at] , low[to] )
lowercase = []
for i in range(_UpperCAmelCase ):
if not visited[i]:
dfs(_UpperCAmelCase , -1 , _UpperCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase : str = random.Random()
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=1.0 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=None ) -> Dict:
"""simple docstring"""
if rng is None:
_SCREAMING_SNAKE_CASE =global_rng
_SCREAMING_SNAKE_CASE =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Tuple , _a : Optional[Any]=7 , _a : int=400 , _a : List[str]=2000 , _a : int=2048 , _a : List[str]=128 , _a : List[str]=1 , _a : int=512 , _a : Any=30 , _a : List[str]=4_4100 , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =min_seq_length
_SCREAMING_SNAKE_CASE =max_seq_length
_SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE =spectrogram_length
_SCREAMING_SNAKE_CASE =feature_size
_SCREAMING_SNAKE_CASE =num_audio_channels
_SCREAMING_SNAKE_CASE =hop_length
_SCREAMING_SNAKE_CASE =chunk_length
_SCREAMING_SNAKE_CASE =sampling_rate
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A ( self : Any , _a : Optional[Any]=False , _a : List[Any]=False ) -> Tuple:
'''simple docstring'''
def _flatten(_a : List[str] ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
_SCREAMING_SNAKE_CASE =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE =[np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( lowerCamelCase_ , unittest.TestCase ):
A__ = TvltFeatureExtractor
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TvltFeatureExtractionTester(self )
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case__ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case__ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case__ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case__ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case__ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case__ , 'sampling_rate' ) )
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =dict_first.pop('mel_filters' )
_SCREAMING_SNAKE_CASE =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =os.path.join(snake_case__ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case__ )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_json_file(snake_case__ )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =dict_first.pop('mel_filters' )
_SCREAMING_SNAKE_CASE =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def A ( self : List[str] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
_SCREAMING_SNAKE_CASE =feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_SCREAMING_SNAKE_CASE =feature_extractor(snake_case__ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_SCREAMING_SNAKE_CASE =feature_extractor(
snake_case__ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE =np.asarray(snake_case__ )
_SCREAMING_SNAKE_CASE =feature_extractor(snake_case__ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def A ( self : List[str] , _a : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE =ds.sort('id' ).select(range(snake_case__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE =TvltFeatureExtractor()
_SCREAMING_SNAKE_CASE =feature_extractor(snake_case__ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1e-4 ) )
| 405 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FunnelConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 105 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 665 | 1 |
import math
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int = 0 , lowerCAmelCase: int = 0 )-> list:
_snake_case : Tuple = end or len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : Optional[int] = i
_snake_case : Tuple = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_snake_case : Dict = array[temp_index - 1]
temp_index -= 1
_snake_case : Dict = temp_index_value
return array
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: int )-> None: # Max Heap
_snake_case : List[Any] = index
_snake_case : List[str] = 2 * index + 1 # Left Node
_snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_snake_case : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_snake_case : Optional[int] = right_index
if largest != index:
_snake_case : str = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( lowerCAmelCase: list )-> list:
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 , 0 , -1 ):
_snake_case : Union[str, Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
return array
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> int:
_snake_case : List[Any] = low
_snake_case : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_snake_case : Optional[Any] = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCAmelCase: list )-> list:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return array
_snake_case : List[Any] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE__ ) ) )
_snake_case : Optional[Any] = 16
return intro_sort(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE__ )
max_depth -= 1
_snake_case : str = median_of_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , start + ((end - start) // 2) + 1 , end - 1 )
_snake_case : Any = partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
intro_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = input("""Enter numbers separated by a comma : """).strip()
lowerCAmelCase_ = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 411 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Union[str, Any] , A : Optional[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_UpperCAmelCase : List[str] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[str] = "sgugger/tiny-distilbert-classification"
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[str] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Dict = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : List[str] = "sshleifer/tinier_bart"
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Tuple = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[int] = "sshleifer/tinier_bart"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : str = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A , "train_time.csv" ) , env_info_csv_file=os.path.join(A , "env.csv" ) , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "env.csv" ) ).exists() )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A : int ):
self.assertTrue(hasattr(A , "sequential" ) )
self.assertTrue(hasattr(A , "cumulative" ) )
self.assertTrue(hasattr(A , "current" ) )
self.assertTrue(hasattr(A , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , "log.txt" ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
_UpperCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , "log.txt" ) ).exists() )
| 289 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=True , a__=False , a__=False , a__=False , a__=2 , a__=9_9 , a__=0 , a__=3_2 , a__=5 , a__=4 , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=2 , a__=4 , a__="last" , a__=True , a__=None , a__=0 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_lengths
A__ = use_token_type_ids
A__ = use_labels
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = vocab_size
A__ = n_special
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = summary_type
A__ = use_proj
A__ = scope
A__ = bos_token_id
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_input_lengths:
A__ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , 2).float()
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ ( self):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = XLMModel(config=a__)
model.to(a__)
model.eval()
A__ = model(a__ , lengths=a__ , langs=a__)
A__ = model(a__ , langs=a__)
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = XLMWithLMHeadModel(a__)
model.to(a__)
model.eval()
A__ = model(a__ , token_type_ids=a__ , labels=a__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = XLMForQuestionAnsweringSimple(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(a__ , start_positions=a__ , end_positions=a__)
A__ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = XLMForQuestionAnswering(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(
a__ , start_positions=a__ , end_positions=a__ , cls_index=a__ , is_impossible=a__ , p_mask=a__ , )
A__ = model(
a__ , start_positions=a__ , end_positions=a__ , cls_index=a__ , is_impossible=a__ , )
((A__) , ) = result_with_labels.to_tuple()
A__ = model(a__ , start_positions=a__ , end_positions=a__)
((A__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = XLMForSequenceClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(a__ , labels=a__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_labels
A__ = XLMForTokenClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__ , attention_mask=a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_choices
A__ = XLMForMultipleChoice(config=a__)
model.to(a__)
model.eval()
A__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ ( self , a__ , a__ , a__=False):
A__ = super()._prepare_for_class(a__ , a__ , return_labels=a__)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__)
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__)
return inputs_dict
def snake_case_ ( self):
A__ = XLMModelTester(self)
A__ = ConfigTester(self , config_class=a__ , emb_dim=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a__)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1):
self.assertIsInstance(a__ , a__)
self.assertListEqual(
[isinstance(a__ , a__) for iter_attentions in attentions] , [True] * len(a__))
self.assertEqual(len(a__) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(a__):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = min_length + idx + 1
A__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a__))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1):
self.assertIsInstance(a__ , a__)
self.assertListEqual(
[isinstance(a__ , a__) for iter_hidden_states in hidden_states] , [True] * len(a__) , )
self.assertEqual(len(a__) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(a__):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a__) , )
pass
@slow
def snake_case_ ( self):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = XLMModel.from_pretrained(a__)
self.assertIsNotNone(a__)
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ ( self):
A__ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''')
model.to(a__)
A__ = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=a__) # the president
A__ = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ = model.generate(a__ , do_sample=a__)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a__)
| 526 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = "T5Config"
def lowerCAmelCase__ ( UpperCamelCase_ : jnp.array , UpperCamelCase_ : int , UpperCamelCase_ : int )-> jnp.ndarray:
A__ = jnp.zeros_like(UpperCamelCase_ )
A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A__ = shifted_input_ids.at[:, 0].set(UpperCamelCase_ )
A__ = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase_ , UpperCamelCase_ )
return shifted_input_ids
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
| 526 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _UpperCamelCase ):
"""simple docstring"""
__A : List[str] = ["image_processor", "tokenizer"]
__A : List[str] = "BridgeTowerImageProcessor"
__A : int = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , lowercase , lowercase) -> int:
'''simple docstring'''
super().__init__(__a , __a)
def __call__( self , lowercase , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
a__ : int = self.image_processor(
__a , return_tensors=__a , do_normalize=__a , do_center_crop=__a , **__a)
encoding.update(__a)
return encoding
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def __lowercase ( self , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.tokenizer.model_input_names
a__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 302 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCamelCase__ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCamelCase__ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCamelCase__ = reader.read()
lowerCamelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCamelCase__ = UNetaDModel(**config)
else:
lowerCamelCase__ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCamelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ = config[key]
del config[key]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCamelCase__ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCamelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCamelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCamelCase__ = param_value
lowerCamelCase__ = True
if not has_changed:
lowerCamelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : int = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self , **lowercase__ ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self , **lowercase__ ) -> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self , **lowercase__ ) -> Optional[int]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
SCREAMING_SNAKE_CASE : List[str] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = 'lower newer'
SCREAMING_SNAKE_CASE : int = processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
SCREAMING_SNAKE_CASE : Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = 'lower newer'
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = ['cat', 'nasa badge']
SCREAMING_SNAKE_CASE : Any = processor(text=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : int = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Dict = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE : str = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = [['cat', 'nasa badge'], ['person']]
SCREAMING_SNAKE_CASE : List[Any] = processor(text=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[Any] = 16
SCREAMING_SNAKE_CASE : str = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = max([len(SCREAMING_SNAKE_CASE_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = 'google/owlvit-base-patch32'
SCREAMING_SNAKE_CASE : int = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = ['cat', 'nasa badge']
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = 16
SCREAMING_SNAKE_CASE : int = inputs['input_ids']
SCREAMING_SNAKE_CASE : str = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = processor(images=SCREAMING_SNAKE_CASE_ , query_images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 720 | '''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( a_ , a_ = None ) -> list[list[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(a_ ) + 1
SCREAMING_SNAKE_CASE : list[list[list[str]]] = []
for _ in range(a_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a_ )] == word:
SCREAMING_SNAKE_CASE : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a_ )]:
combination.reverse()
return table[len(a_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 179 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = os.path.dirname(os.path.realpath(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(lowercase__ , 'words.txt' )
SCREAMING_SNAKE_CASE__ : int = ''
with open(lowercase__ ) as f:
SCREAMING_SNAKE_CASE__ : int = f.readline()
SCREAMING_SNAKE_CASE__ : List[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
SCREAMING_SNAKE_CASE__ : Dict = [
word
for word in [sum(ord(lowercase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 85 | import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> Union[str, Any]:
try:
snake_case : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Any = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : List[Any] = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
lowerCamelCase : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
lowerCamelCase : str = parse_flag_from_env('RUN_LOCAL', default=True)
lowerCamelCase : int = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowerCamelCase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowerCamelCase : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowerCamelCase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowerCamelCase : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowerCamelCase : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowerCamelCase : List[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import faiss # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires faiss""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
try:
import regex # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires regex""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires sqlalchemy""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if not config.TORCH_AVAILABLE:
snake_case : Optional[Any] = unittest.skip("""test requires PyTorch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if not config.TF_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if not config.JAX_AVAILABLE:
snake_case : List[str] = unittest.skip("""test requires JAX""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not config.PIL_AVAILABLE:
snake_case : List[Any] = unittest.skip("""test requires Pillow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(lowercase )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase ) )(lowercase )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Optional[int] = unittest.skip("""test is slow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_local_tests or _run_local_tests == 0:
snake_case : Tuple = unittest.skip("""test is local""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : Tuple = unittest.skip("""test is packaged""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Dict = unittest.skip("""test requires remote""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> int:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case : Any = decorator(lowercase )
setattr(cls ,lowercase ,lowercase )
return cls
return decorate
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
_snake_case = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( lowercase=OfflineSimulationMode.CONNECTION_FAILS ,lowercase=1E-16 ) -> List[Any]:
snake_case : Any = requests.Session().request
def timeout_request(lowercase ,lowercase ,lowercase ,**lowercase ):
# Change the url to an invalid url so that the connection hangs
snake_case : List[Any] = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
snake_case : Any = timeout
try:
return online_request(lowercase ,lowercase ,**lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Optional[int] = e.args[0]
snake_case : List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"""OfflineMock[{url}]""" ),)
snake_case : Dict = (max_retry_error,)
raise
def raise_connection_error(lowercase ,lowercase ,**lowercase ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,lowercase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Optional[Any]:
snake_case : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase ,**lowercase ) as tmp_dir:
try:
os.chdir(lowercase )
yield
finally:
os.chdir(lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import gc
gc.collect()
snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
return deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase ,*lowercase ,**lowercase ):
try:
return func(*lowercase ,**lowercase )
except HTTPError as err:
if str(lowercase ).startswith("""500""" ) or str(lowercase ).startswith("""502""" ):
pytest.xfail(str(lowercase ) )
raise err
return decorator.decorator(_wrapper ,lowercase )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
while True:
snake_case : List[str] = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : str = []
snake_case : List[str] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : List[str] = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : Dict = asyncio.get_event_loop()
snake_case : Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : Any = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[str] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Optional[Any] = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
snake_case : Tuple = re.sub(R"""^gw""" ,"""""" ,lowercase ,0 ,re.M )
return int(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Dict = 29500
snake_case : int = pytest_xdist_worker_id()
return port + uniq_delta
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : int , A : int ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) : int = extended_euclid(A , a % b )
UpperCAmelCase_ : Tuple = a // b
return (y, x - k * y)
def __UpperCAmelCase ( A : int , A : int , A : int , A : int ) -> int:
((UpperCAmelCase_) , (UpperCAmelCase_)) : int = extended_euclid(A , A )
UpperCAmelCase_ : Optional[Any] = na * na
UpperCAmelCase_ : str = ra * x * na + ra * y * na
return (n % m + m) % m
def __UpperCAmelCase ( A : int , A : int ) -> int:
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = extended_euclid(A , A )
if b < 0:
UpperCAmelCase_ : Dict = (b % n + n) % n
return b
def __UpperCAmelCase ( A : int , A : int , A : int , A : int ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = invert_modulo(A , A ), invert_modulo(A , A )
UpperCAmelCase_ : List[str] = na * na
UpperCAmelCase_ : int = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 541 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCamelCase : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case__ ( datasets.BuilderConfig):
a_ = None
a_ = "utf-8"
a_ = None
a_ = None
a_ = True # deprecated
a_ = None # deprecated
a_ = 10 << 20 # 10MB
a_ = None
class snake_case__ ( datasets.ArrowBasedBuilder):
a_ = JsonConfig
def A ( self : Union[str, Any] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
UpperCAmelCase_ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Optional[int] , _A : List[str] ) -> int:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
UpperCAmelCase_ : List[Any] = data_files
if isinstance(_A , _A ):
UpperCAmelCase_ : List[str] = [files]
UpperCAmelCase_ : List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCAmelCase_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
UpperCAmelCase_ : Optional[Any] = [files]
UpperCAmelCase_ : List[Any] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def A ( self : List[Any] , _A : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ : Union[str, Any] = self.config.features.arrow_schema.field(_A ).type
UpperCAmelCase_ : int = pa_table.append_column(_A , pa.array([None] * len(_A ) , type=_A ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ : Dict = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def A ( self : Optional[Any] , _A : List[Any] ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : Optional[int] = json.load(_A )
# We keep only the field we are interested in
UpperCAmelCase_ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple) ):
UpperCAmelCase_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : str = {col: [row.get(_A ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ : Tuple = dataset
UpperCAmelCase_ : List[str] = pa.Table.from_pydict(_A )
yield file_idx, self._cast_table(_A )
# If the file has one json object per line
else:
with open(_A , '''rb''' ) as f:
UpperCAmelCase_ : Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ : Optional[Any] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ : Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
UpperCAmelCase_ : Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ : List[Any] = batch.decode(self.config.encoding , errors=_A ).encode('''utf-8''' )
try:
while True:
try:
UpperCAmelCase_ : int = paj.read_json(
io.BytesIO(_A ) , read_options=paj.ReadOptions(block_size=_A ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid )
and "straddling" not in str(_A )
or block_size > len(_A )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_A )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : List[Any] = json.load(_A )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : Any = {col: [row.get(_A ) for row in dataset] for col in keys}
UpperCAmelCase_ : Optional[int] = pa.Table.from_pydict(_A )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_A )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A )
batch_idx += 1
| 541 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCAmelCase ( _snake_case):
def lowerCamelCase__ ( self , snake_case_ ):
return 0.0
def a__ ( a : np.ndarray , a : int ):
"""simple docstring"""
_snake_case : Any = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case : Union[str, Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a__ ( a : FilterType , a : int ):
"""simple docstring"""
_snake_case : List[Any] = 512
_snake_case : Dict = [1] + [0] * (size - 1)
_snake_case : Union[str, Any] = [filter_type.process(a ) for item in inputs]
_snake_case : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case : List[Any] = np.abs(np.fft.fft(a ) )
_snake_case : List[Any] = 20 * np.logaa(a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_snake_case : Tuple = get_bounds(a , a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(a )
plt.show()
def a__ ( a : FilterType , a : int ):
"""simple docstring"""
_snake_case : str = 512
_snake_case : Optional[Any] = [1] + [0] * (size - 1)
_snake_case : Optional[Any] = [filter_type.process(a ) for item in inputs]
_snake_case : Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case : Dict = np.angle(np.fft.fft(a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(a , -2 * pi ) )
plt.show()
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ ( UpperCamelCase__ ):
a__ : List[str] = ComputeEnvironment.AMAZON_SAGEMAKER
a__ : Tuple = True
a__ : Any = 'ml.p3.2xlarge'
a__ : Tuple = 'accelerate_sagemaker_execution_role'
a__ : List[Any] = 'hf-sm'
a__ : Dict = 'us-east-1'
a__ : List[Any] = 1
a__ : Union[str, Any] = 'accelerate-sagemaker-1'
a__ : str = '1.6'
a__ : Optional[Any] = '4.4'
a__ : Optional[int] = 'train.py'
a__ : List[str] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
a__ : Optional[int] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : Union[str, Any] ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase__ : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''], __lowerCamelCase )
assert isinstance(converted_args['''do_train'''], __lowerCamelCase )
assert isinstance(converted_args['''epochs'''], __lowerCamelCase )
assert isinstance(converted_args['''learning_rate'''], __lowerCamelCase )
assert isinstance(converted_args['''max_steps'''], __lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 344 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase ,lowercase ,lowercase ,lowercase )
for node in graph )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase ,lowercase ,lowercase ,lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : List[str] = "ClapFeatureExtractor"
__a : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : str , lowercase : Optional[Any] , lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase , lowercase )
def __call__( self : Dict , lowercase : List[Any]=None , lowercase : Union[str, Any]=None , lowercase : List[str]=None , **lowercase : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = kwargs.pop("""sampling_rate""" , lowercase )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
UpperCamelCase__ = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if audios is not None:
UpperCamelCase__ = self.feature_extractor(
lowercase , sampling_rate=lowercase , return_tensors=lowercase , **lowercase )
if text is not None and audios is not None:
UpperCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Any , **lowercase : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Tuple , *lowercase : int , **lowercase : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 708 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ : Union[str, Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ : int = None
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __magic_name__( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(""" """ , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __magic_name__( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def __magic_name__( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = collections.Counter(_A ) & collections.Counter(_A )
UpperCamelCase__ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["""id"""]
UpperCamelCase__ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(_A , _A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def __magic_name__( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_A , _A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def __magic_name__( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_0_0.0 * avg_prec}
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCamelCase__ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_A , _A , """pr_exact""" )
merge_eval(_A , _A , """pr_f1""" )
merge_eval(_A , _A , """pr_oracle""" )
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 1_0_0.0 * best_score / len(_A ), best_thresh
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def __magic_name__( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(_A )
UpperCamelCase__ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(_A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(_A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(_A , _A )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(_A , _A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """HasAns""" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ : str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 265 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __a ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase_ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCamelCase_ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
UpperCamelCase_ : str = "text"
UpperCamelCase_ : str = "summary"
@property
def _SCREAMING_SNAKE_CASE ( self : str )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 554 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCamelCase__ ( )-> Tuple:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=UpperCAmelCase_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=UpperCAmelCase_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=UpperCAmelCase_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=UpperCAmelCase_ , default=10_00 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=UpperCAmelCase_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=UpperCAmelCase_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=UpperCAmelCase_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase = parser.parse_args()
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def fn(UpperCAmelCase_ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase__ ( UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase = tf.train.Features(feature=UpperCAmelCase_ )
UpperCamelCase = tf.train.Example(features=UpperCAmelCase_ )
UpperCamelCase = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase = min(len(UpperCAmelCase_ ) , args.limit )
UpperCamelCase = dataset.select(range(UpperCAmelCase_ ) )
print(F"Limiting the dataset to {args.limit} entries." )
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
UpperCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase = tokenize_function(UpperCAmelCase_ )
UpperCamelCase = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ ):
# Concatenate all texts.
UpperCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase = dataset_tokenized.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=10_00 , num_proc=4 )
UpperCamelCase = 0
UpperCamelCase = 0
for shard in range(0 , len(UpperCAmelCase_ ) , args.shard_size ):
UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase = len(dataset_snapshot["input_ids"] )
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
UpperCamelCase = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("Wrote file {} containing {} records".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 554 | 1 |
'''simple docstring'''
class UpperCAmelCase_ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[bool]] ) -> None:
'''simple docstring'''
lowercase : Optional[int] =row
lowercase : Union[str, Any] =col
lowercase : List[str] =graph
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[bool]] ) -> None:
'''simple docstring'''
lowercase : List[str] =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict =[-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : List[str] =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase )
def A__ ( self : Tuple ) -> int: # And finally, count all islands.
'''simple docstring'''
lowercase : Tuple =[[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : List[Any] =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
count += 1
return count
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
lowercase__ : Optional[int] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ : Union[str, Any] = input_file.read()
lowercase__ : str = regexp.search(lowerCamelCase )
return match
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
lowercase__ : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
lowercase__ : List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ : Optional[int] = regexp.finditer(lowerCamelCase )
lowercase__ : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : int = Path("./datasets" )
lowercase__ : str = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = Path("./datasets" )
lowercase__ : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" ) | 397 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : "DiagonalGaussianDistribution"
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : str = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_82_15 , ) -> Tuple:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowercase__ : List[Any] = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
lowercase__ : Union[str, Any] = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
lowercase__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowercase__ : Optional[int] = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
lowercase__ : List[Any] = False
lowercase__ : str = False
# only relevant if vae tiling is enabled
lowercase__ : int = self.config.sample_size
lowercase__ : Dict = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowercase__ : Any = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowercase__ : str = 0.25
def __a ( self , lowerCamelCase , lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
lowercase__ : int = value
def __a ( self , lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = use_tiling
def __a ( self ) -> Any:
"""simple docstring"""
self.enable_tiling(lowerCamelCase )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : str = True
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __a ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowercase__ : Any = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
lowercase__ : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def __a ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
lowercase__ : int = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
lowercase__ : Any = torch.cat(lowerCamelCase )
else:
lowercase__ : Optional[int] = self.encoder(lowerCamelCase )
lowercase__ : Optional[Any] = self.quant_conv(lowerCamelCase )
lowercase__ : str = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
lowercase__ : Tuple = self.post_quant_conv(lowerCamelCase )
lowercase__ : List[Any] = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowercase__ : Optional[Any] = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
lowercase__ : Dict = torch.cat(lowerCamelCase )
else:
lowercase__ : Dict = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
lowercase__ : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : str = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
lowercase__ : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
lowercase__ : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowercase__ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowercase__ : Optional[int] = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
lowercase__ : int = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
lowercase__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowercase__ : Any = self.encoder(lowerCamelCase )
lowercase__ : Optional[int] = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : Optional[int] = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Dict = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Any = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : Dict = torch.cat(lowerCamelCase , dim=2 )
lowercase__ : List[str] = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowercase__ : Union[str, Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowercase__ : List[Any] = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
lowercase__ : Dict = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
lowercase__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowercase__ : int = self.post_quant_conv(lowerCamelCase )
lowercase__ : Optional[Any] = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : str = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Tuple = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Optional[int] = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : str = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Optional[int] = sample
lowercase__ : List[Any] = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
lowercase__ : Union[str, Any] = posterior.sample(generator=lowerCamelCase )
else:
lowercase__ : int = posterior.mode()
lowercase__ : Tuple = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase ) | 397 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__a: Any = logging.getLogger()
def __UpperCamelCase ( ):
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ : Dict = parser.parse_args()
return args.f
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = {}
lowercase__ : Dict = os.path.join(UpperCAmelCase , '''all_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
lowercase__ : Optional[int] = json.load(UpperCAmelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __UpperCamelCase ( ):
lowercase__ : Any = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
__a: List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase( cls ) -> int:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowercase__ : Tuple = tempfile.mkdtemp()
lowercase__ : Optional[Any] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase__ : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _lowerCAmelCase( cls ) -> Optional[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase__ : int = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ : Tuple = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = self.get_auto_remove_tmp_dir()
lowercase__ : List[str] = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase__ : str = get_results(__lowerCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.get_auto_remove_tmp_dir()
lowercase__ : Tuple = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : Union[str, Any] = get_results(__lowerCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Any:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase__ : List[Any] = 7 if get_gpu_count() > 1 else 2
lowercase__ : Dict = self.get_auto_remove_tmp_dir()
lowercase__ : int = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : str = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = self.get_auto_remove_tmp_dir()
lowercase__ : List[Any] = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : Dict = get_results(__lowerCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Dict = self.get_auto_remove_tmp_dir()
lowercase__ : List[Any] = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : Union[str, Any] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase__ : List[Any] = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : str = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = self.get_auto_remove_tmp_dir()
lowercase__ : Tuple = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowercase__ : Optional[int] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase__ : Optional[Any] = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowercase__ : Tuple = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : int = self.get_auto_remove_tmp_dir()
lowercase__ : Union[str, Any] = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowercase__ : List[str] = get_results(__lowerCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''image_classification_no_trainer''' ) ) )
| 716 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__a: List[Any] = logging.getLogger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sequence-classification"
def __init__( self , __lowerCAmelCase ) -> Optional[Any]:
if type(__lowerCAmelCase ) == dict:
lowercase__ : str = Namespace(**__lowerCAmelCase )
lowercase__ : str = glue_output_modes[hparams.task]
lowercase__ : Dict = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
return self.model(**__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase__ : int = self(**__lowerCAmelCase )
lowercase__ : Optional[Any] = outputs[0]
lowercase__ : List[str] = self.trainer.lr_schedulers[0]['''scheduler''']
lowercase__ : str = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : int = self.hparams
lowercase__ : Tuple = processors[args.task]()
lowercase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
lowercase__ : Union[str, Any] = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase__ : List[Any] = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> DataLoader:
lowercase__ : Dict = '''dev''' if mode == '''test''' else mode
lowercase__ : List[str] = self._feature_file(__lowerCAmelCase )
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
lowercase__ : Dict = torch.load(__lowerCAmelCase )
lowercase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Union[str, Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase__ : List[Any] = self(**__lowerCAmelCase )
lowercase__ , lowercase__ : int = outputs[:2]
lowercase__ : List[str] = logits.detach().cpu().numpy()
lowercase__ : Any = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> tuple:
lowercase__ : List[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase__ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase__ : Any = np.argmax(__lowerCAmelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Optional[Any] = np.squeeze(__lowerCAmelCase )
lowercase__ : Optional[Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : List[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase )}
lowercase__ : int = dict(results.items() )
lowercase__ : Optional[int] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(__lowerCAmelCase )
lowercase__ : List[Any] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = self._eval_end(__lowerCAmelCase )
lowercase__ : Union[str, Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase ) -> str:
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowerCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __UpperCamelCase ( ):
lowercase__ : int = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase , os.getcwd() )
lowercase__ : List[Any] = GLUETransformer.add_model_specific_args(UpperCAmelCase , os.getcwd() )
lowercase__ : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase__ : List[Any] = os.path.join(
'''./results''' , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
lowercase__ : Optional[int] = GLUETransformer(UpperCAmelCase )
lowercase__ : Any = generic_train(UpperCAmelCase , UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCAmelCase ) )
lowercase__ : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase )
if __name__ == "__main__":
main()
| 428 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Any = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: List[str] = controlnet_params
UpperCamelCase_: Any = 'bird'
UpperCamelCase_: Optional[int] = jax.device_count()
UpperCamelCase_: str = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
UpperCamelCase_: Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase_: Tuple = jax.random.PRNGKey(0 )
UpperCamelCase_: Optional[Any] = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Optional[Any] = replicate(_lowerCamelCase )
UpperCamelCase_: List[Any] = shard(_lowerCamelCase )
UpperCamelCase_: List[Any] = shard(_lowerCamelCase )
UpperCamelCase_: Tuple = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: Any = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: str = controlnet_params
UpperCamelCase_: Any = 'Chef in the kitchen'
UpperCamelCase_: Any = jax.device_count()
UpperCamelCase_: List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
UpperCamelCase_: Any = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase_: Optional[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_: List[str] = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Union[str, Any] = replicate(_lowerCamelCase )
UpperCamelCase_: List[str] = shard(_lowerCamelCase )
UpperCamelCase_: List[str] = shard(_lowerCamelCase )
UpperCamelCase_: List[Any] = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 57 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = IFImgaImgSuperResolutionPipeline
__UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"})
__UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowercase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
UpperCamelCase : List[str] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643 | 0 |
import baseaa
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :bytes ) -> str:
return baseaa.aaadecode(SCREAMING_SNAKE_CASE ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 504 |
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( *SCREAMING_SNAKE_CASE :float ) -> bool:
__lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
) | 504 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 264 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( lowercase: SplitDict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: List[str] = split_dict._to_yaml_list()
assert len(lowercase ) == len(lowercase )
_UpperCamelCase: Tuple = SplitDict._from_yaml_list(lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCamelCase: Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
_UpperCamelCase: Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowerCAmelCase_ ( lowercase: str ) -> Tuple:
'''simple docstring'''
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCamelCase: Optional[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 264 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = PhobertTokenizer
lowerCAmelCase_ : Optional[int] = False
def A__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"]
UpperCAmelCase_ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "l à</w>"]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "Tôi là VinAI Research"
UpperCAmelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def A__ ( self ):
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "Tôi là VinAI Research"
UpperCAmelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
UpperCAmelCase_ = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 579 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = """camembert"""
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any]=3_05_22 , _lowerCamelCase : List[str]=7_68 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : Any=30_72 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Union[str, Any]=5_12 , _lowerCamelCase : str=2 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : str=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[str]="absolute" , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Any = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Tuple = type_vocab_size
A_ : Dict = initializer_range
A_ : Tuple = layer_norm_eps
A_ : List[Any] = position_embedding_type
A_ : Any = use_cache
A_ : Dict = classifier_dropout
class lowercase ( __UpperCAmelCase):
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 361 | 1 |
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a ( UpperCamelCase_ ):
__lowercase = """tapas"""
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10_24 , __UpperCamelCase=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=10.0 , __UpperCamelCase=0 , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=1.0 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase="ratio" , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=64 , __UpperCamelCase=32 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ : List[str] =vocab_size
A__ : Dict =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[int] =hidden_act
A__ : List[Any] =intermediate_size
A__ : Union[str, Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Union[str, Any] =type_vocab_sizes
A__ : Optional[Any] =initializer_range
A__ : Any =layer_norm_eps
# Fine-tuning task hyperparameters
A__ : int =positive_label_weight
A__ : Optional[Any] =num_aggregation_labels
A__ : Any =aggregation_loss_weight
A__ : Tuple =use_answer_as_supervision
A__ : List[Any] =answer_loss_importance
A__ : List[str] =use_normalized_answer_loss
A__ : List[str] =huber_loss_delta
A__ : int =temperature
A__ : str =aggregation_temperature
A__ : int =use_gumbel_for_cells
A__ : int =use_gumbel_for_aggregation
A__ : List[str] =average_approximation_function
A__ : Dict =cell_selection_preference
A__ : Tuple =answer_loss_cutoff
A__ : Any =max_num_rows
A__ : Optional[int] =max_num_columns
A__ : Optional[int] =average_logits_per_cell
A__ : int =select_one_column
A__ : List[Any] =allow_empty_column_selection
A__ : Any =init_cell_selection_weights_to_zero
A__ : int =reset_position_index_per_cell
A__ : Any =disable_per_token_loss
# Aggregation hyperparameters
A__ : Optional[Any] =aggregation_labels
A__ : Tuple =no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCamelCase ):
A__ : int ={int(__UpperCamelCase ): v for k, v in aggregation_labels.items()}
| 416 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Tuple =1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Optional[Any] =0
while number > 0:
A__ : List[Any] =number % 1_0
sum_of_digits += last_digit
A__ : str =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
A__ : List[str] =factorial(snake_case_ )
A__ : str =split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 416 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_A : List[Any] = threading.Lock()
_A : Optional[logging.Handler] = None
_A : Union[str, Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_A : Optional[int] = logging.WARNING
_A : Optional[Any] = True
def _a ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _a ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def _a ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _a ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCamelCase__ : Tuple = logging.StreamHandler() # Set sys.stderr as stream.
lowerCamelCase__ : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCamelCase__ : Any = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCamelCase__ : List[str] = False
def _a ( ) -> None:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
lowerCamelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCamelCase__ : Dict = None
def _a ( ) -> Tuple:
"""simple docstring"""
return log_levels
def _a ( UpperCAmelCase = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
lowerCamelCase__ : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase )
def _a ( ) -> int:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
return set_verbosity(UpperCAmelCase )
def _a ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _a ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase )
def _a ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
lowerCamelCase__ : List[Any] = False
def _a ( ) -> None:
"""simple docstring"""
_configure_library_root_logger()
lowerCamelCase__ : Union[str, Any] = True
def _a ( ) -> None:
"""simple docstring"""
lowerCamelCase__ : str = _get_library_root_logger().handlers
for handler in handlers:
lowerCamelCase__ : Any = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(UpperCAmelCase )
def _a ( ) -> None:
"""simple docstring"""
lowerCamelCase__ : int = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase )
def _a ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , UpperCAmelCase )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase , **UpperCAmelCase )
_A : Any = warning_advice
@functools.lru_cache(UpperCAmelCase )
def _a ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
"""simple docstring"""
self.warning(*UpperCAmelCase , **UpperCAmelCase )
_A : Dict = warning_once
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , *A : str , **A : Union[str, Any] ) ->Tuple: # pylint: disable=unused-argument
lowerCamelCase__ : int = args[0] if args else None
def __iter__( self : Optional[int] ) ->str:
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , A : str ) ->Dict:
def empty_fn(*A : str , **A : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) ->str:
return self
def __exit__( self : Union[str, Any] , A : List[str] , A : Optional[int] , A : int ) ->Optional[int]:
return
class __SCREAMING_SNAKE_CASE :
def __call__( self : Any , *A : Any , **A : Union[str, Any] ) ->Dict:
if _tqdm_active:
return tqdm_lib.tqdm(*A , **A )
else:
return EmptyTqdm(*A , **A )
def __lowerCamelCase ( self : List[str] , *A : str , **A : Tuple ) ->Any:
lowerCamelCase__ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A , **A )
def __lowerCamelCase ( self : Tuple ) ->str:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_A : Tuple = _tqdm_cls()
def _a ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _a ( ) -> List[str]:
"""simple docstring"""
global _tqdm_active
lowerCamelCase__ : str = True
hf_hub_utils.enable_progress_bars()
def _a ( ) -> int:
"""simple docstring"""
global _tqdm_active
lowerCamelCase__ : int = False
hf_hub_utils.disable_progress_bars()
| 130 |
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->int:
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
def __eq__( self : Optional[int] , A : Optional[Any] ) ->List[Any]:
return self.position == cell.position
def __lowerCamelCase ( self : List[str] ) ->int:
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , A : List[str]=(5, 5) ) ->Optional[int]:
lowerCamelCase__ : int = np.zeros(A )
lowerCamelCase__ : Optional[int] = world_size[0]
lowerCamelCase__ : Optional[int] = world_size[1]
def __lowerCamelCase ( self : List[str] ) ->List[str]:
print(self.w )
def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->Optional[Any]:
lowerCamelCase__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCamelCase__ : List[Any] = cell.position[0]
lowerCamelCase__ : Union[str, Any] = cell.position[1]
lowerCamelCase__ : int = []
for n in neughbour_cord:
lowerCamelCase__ : Tuple = current_x + n[0]
lowerCamelCase__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCamelCase__ : List[Any] = Cell()
lowerCamelCase__ : Tuple = (x, y)
lowerCamelCase__ : List[Any] = cell
neighbours.append(A )
return neighbours
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = []
_open.append(UpperCAmelCase )
while _open:
lowerCamelCase__ : Any = np.argmin([n.f for n in _open] )
lowerCamelCase__ : List[str] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase ):
for c in _closed:
if c == n:
continue
lowerCamelCase__ : Any = current.g + 1
lowerCamelCase__ , lowerCamelCase__ : str = n.position
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = goal.position
lowerCamelCase__ : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCamelCase__ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCamelCase__ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_A : Dict = Gridworld()
# Start position and goal
_A : Any = Cell()
_A : int = (0, 0)
_A : Optional[int] = Cell()
_A : Tuple = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
_A : int = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_A : List[Any] = 1
print(world.w)
| 130 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "megatron-bert"
def __init__( self : int , __lowerCamelCase : Any=2_9_0_5_6 , __lowerCamelCase : Tuple=1_0_2_4 , __lowerCamelCase : List[str]=2_4 , __lowerCamelCase : str=1_6 , __lowerCamelCase : str=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[str] , ):
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase__ :List[Any] = vocab_size
UpperCAmelCase__ :str = hidden_size
UpperCAmelCase__ :int = num_hidden_layers
UpperCAmelCase__ :Dict = num_attention_heads
UpperCAmelCase__ :str = hidden_act
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :Tuple = hidden_dropout_prob
UpperCAmelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ :Tuple = max_position_embeddings
UpperCAmelCase__ :Union[str, Any] = type_vocab_size
UpperCAmelCase__ :str = initializer_range
UpperCAmelCase__ :Tuple = layer_norm_eps
UpperCAmelCase__ :Tuple = position_embedding_type
UpperCAmelCase__ :Optional[int] = use_cache
| 467 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = list(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase__ :Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :int = []
while True:
UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ )
UpperCAmelCase__ :List[str] = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase__ :Any = '''*'''
UpperCAmelCase__ :List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) )
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ):
UpperCAmelCase__ :int = []
for minterm in minterms:
UpperCAmelCase__ :int = ''''''
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = list(UpperCamelCase_ )
UpperCAmelCase__ :str = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :Union[str, Any] = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase__ :Any = j
if count == 1:
UpperCAmelCase__ :Any = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = -1
UpperCAmelCase__ :Optional[Any] = 0
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase__ :Any = count_n
UpperCAmelCase__ :List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[int] = 0
def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = 1
return chart
def a__ ( ):
UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase__ :Tuple = [
float(UpperCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ )
print('''Prime Implicants are:''' )
print(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 467 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = ['image_processor', 'tokenizer']
UpperCAmelCase : List[str] = 'BlipImageProcessor'
UpperCAmelCase : int = 'AutoTokenizer'
def __init__( self : Tuple , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> int:
_a : str = False
super().__init__(__snake_case , __snake_case )
_a : Union[str, Any] = self.image_processor
def __call__( self : Optional[Any] , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : int , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_a : int = self.tokenizer
_a : List[str] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
return text_encoding
# add pixel_values
_a : Dict = self.image_processor(__snake_case , return_tensors=__snake_case )
if text is not None:
_a : List[str] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
else:
_a : Dict = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def snake_case_ ( self : Tuple , *__snake_case : Optional[Any] , **__snake_case : Tuple ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def snake_case_ ( self : Dict , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case_ ( self : Optional[int] ) -> int:
_a : Dict = self.tokenizer.model_input_names
_a : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 249 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_a : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
_a : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
_a : Any = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_a : Any = [3, 3, 3, 3]
_a : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
_a : List[str] = [4, 4, 4, 4]
_a : Optional[int] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_a : str = [3, 3, 3, 3]
if "lrf" in model_name:
_a : int = [3, 3, 3, 3]
else:
_a : str = [2, 2, 2, 2]
if "tiny" in model_name:
_a : Optional[int] = 96
elif "small" in model_name:
_a : Dict = 96
elif "base" in model_name:
_a : Any = 128
elif "large" in model_name:
_a : int = 192
elif "xlarge" in model_name:
_a : Optional[int] = 256
elif "huge" in model_name:
_a : str = 352
# set label information
_a : Any = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_a : Optional[int] = '''imagenet-22k-id2label.json'''
else:
_a : Dict = '''imagenet-1k-id2label.json'''
_a : int = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_a : int = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
_a : Optional[int] = {v: k for k, v in idalabel.items()}
_a : Union[str, Any] = FocalNetConfig(
embed_dim=UpperCamelCase_ , depths=UpperCamelCase_ , focal_levels=UpperCamelCase_ , focal_windows=UpperCamelCase_ , use_conv_embed=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , use_post_layernorm=UpperCamelCase_ , use_layerscale=UpperCamelCase_ , )
return config
def lowerCamelCase_ ( UpperCamelCase_ ):
if "patch_embed.proj" in name:
_a : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Dict = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a : Any = '''encoder.''' + name
if "encoder.layers" in name:
_a : int = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_a : int = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_a : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_a : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_a : Any = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_a : Tuple = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_a : Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
_a : List[str] = '''layernorm.bias'''
if "head" in name:
_a : List[str] = name.replace('''head''' , '''classifier''' )
else:
_a : str = '''focalnet.''' + name
return name
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
# fmt: off
_a : str = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_a : List[Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase_ )
_a : Optional[int] = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_a : int = state_dict.pop(UpperCamelCase_ )
_a : List[Any] = val
_a : List[str] = get_focalnet_config(UpperCamelCase_ )
_a : Dict = FocalNetForImageClassification(UpperCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase_ )
# verify conversion
_a : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : List[str] = BitImageProcessor(
do_resize=UpperCamelCase_ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase_ , crop_size=224 , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , )
_a : Tuple = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
_a : str = processor(images=UpperCamelCase_ , return_tensors='''pt''' )
_a : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_a : int = image_transforms(UpperCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase_ , atol=1E-4 )
_a : Optional[Any] = model(**UpperCamelCase_ )
_a : int = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_a : str = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_a : Tuple = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_a : List[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_a : Optional[int] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_a : List[Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_a : List[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = KandinskyVaaPriorPipeline
__lowerCamelCase : Optional[Any] = ['prompt']
__lowerCamelCase : Tuple = ['prompt', 'negative_prompt']
__lowerCamelCase : Union[str, Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : int = False
@property
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ (self ) -> Any:
"""simple docstring"""
return 100
@property
def a__ (self ) -> str:
"""simple docstring"""
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(A )
@property
def a__ (self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_a = PriorTransformer(**A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_a = CLIPVisionModelWithProjection(A )
return model
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = CLIPImageProcessor(
crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.dummy_prior
_a = self.dummy_image_encoder
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_image_processor
_a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=A , clip_sample_range=10.0 , )
_a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def a__ (self , A , A=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def a__ (self ) -> int:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**A )
_a = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_a = pipe(**self.get_dummy_inputs(A ) )
_a = output.image_embeds
_a = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
_a = image[0, -10:]
_a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = True
_a = False
self._test_inference_batch_single_identical(
test_max_difference=A , relax_max_difference=A , test_mean_pixel_difference=A , )
@skip_mps
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = False
self._test_attention_slicing_forward_pass(
test_max_difference=A , test_mean_pixel_difference=A , )
| 11 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 1 |
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
A = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_test_to_tester_mapping(__UpperCamelCase )
snake_case_ = get_test_to_tester_mapping(__UpperCamelCase )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_model_to_test_mapping(__UpperCamelCase )
snake_case_ = get_model_to_test_mapping(__UpperCamelCase )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_model_to_tester_mapping(__UpperCamelCase )
snake_case_ = get_model_to_tester_mapping(__UpperCamelCase )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
| 46 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import math
class a_ :
def _snake_case ( self : List[Any] , __UpperCamelCase : list[list[float]] , __UpperCamelCase : list[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _snake_case ( self : Dict , __UpperCamelCase : list[list[int | float]] , __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : float ) ->list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(_A , _A )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main() | 555 |
"""simple docstring"""
import os
from math import logaa
def _UpperCamelCase ( _A = "base_exp.txt" ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_A ) , _A ) ) ):
_UpperCAmelCase ,_UpperCAmelCase = list(map(_A , line.split(""",""" ) ) )
if x * logaa(_A ) > largest:
_UpperCAmelCase = x * logaa(_A )
_UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution()) | 555 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any=False ):
"""simple docstring"""
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
A : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
A : Tuple = parse_flag_from_env('RUN_REMOTE', default=False)
A : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
A : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
A : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
A : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
A : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
A : Optional[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
A : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
A : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
A : Any = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires faiss" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires regex" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires elasticsearch" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires sqlalchemy" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires PyTorch" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires TensorFlow" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires JAX" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires Pillow" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( _snake_case : Optional[Any] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
def _require_spacy_model(_snake_case : List[Any] ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCamelCase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__UpperCamelCase )
else:
return test_case
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ = unittest.skip("test is slow" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ = unittest.skip("test is local" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ = unittest.skip("test is packaged" )(__UpperCamelCase )
return test_case
def snake_case__ ( _snake_case : Optional[Any] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ = unittest.skip("test requires remote" )(__UpperCamelCase )
return test_case
def snake_case__ ( *_snake_case : Any ):
"""simple docstring"""
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith("test" ):
for decorator in decorators:
UpperCamelCase__ = decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
A = 0
A = 1
A = 2
@contextmanager
def snake_case__ ( _snake_case : int=OfflineSimulationMode.CONNECTION_FAILS , _snake_case : Optional[int]=1E-16 ):
"""simple docstring"""
UpperCamelCase__ = requests.Session().request
def timeout_request(_snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[str] , **_snake_case : Dict ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ = '''https://10.255.255.1'''
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase__ = timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ = url
UpperCamelCase__ = e.args[0]
UpperCamelCase__ = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
UpperCamelCase__ = (max_retry_error,)
raise
def raise_connection_error(_snake_case : Tuple , _snake_case : List[Any] , **_snake_case : Any ):
raise requests.ConnectionError("Offline mode is enabled." , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __UpperCamelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def snake_case__ ( *_snake_case : str , **_snake_case : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case__ ( _snake_case : Optional[Any] , _snake_case : int ):
"""simple docstring"""
return deepcopy(__UpperCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_snake_case : Tuple , *_snake_case : Tuple , **_snake_case : Optional[Any] ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith("500" ) or str(__UpperCamelCase ).startswith("502" ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def snake_case__ ( _snake_case : str , _snake_case : Union[str, Any]=None , _snake_case : Tuple=None , _snake_case : List[str]=None , _snake_case : Tuple=False , _snake_case : Optional[Any]=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__UpperCamelCase ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Union[str, Any]="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _snake_case : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda _snake_case : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def snake_case__ ( _snake_case : Any , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : Tuple=1_80 , _snake_case : str=False , _snake_case : int=True ):
"""simple docstring"""
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
UpperCamelCase__ = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
UpperCamelCase__ = '''\n'''.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCamelCase__ = re.sub(R"^gw" , "" , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = 2_95_00
UpperCamelCase__ = pytest_xdist_worker_id()
return port + uniq_delta | 711 | """simple docstring"""
from collections.abc import Sequence
def snake_case__ ( _snake_case : Sequence[float] , _snake_case : bool = False ):
"""simple docstring"""
if not arr:
return 0
UpperCamelCase__ = 0 if allow_empty_subarrays else float("-inf" )
UpperCamelCase__ = 0.0
for num in arr:
UpperCamelCase__ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCamelCase__ = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }") | 304 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase = """roberta"""
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Optional[int]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class a_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 314 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __lowercase) -> str:
raise NotImplementedError()
@abstractmethod
def UpperCamelCase__ ( self) -> Union[str, Any]:
raise NotImplementedError()
| 167 | 0 |
A_ : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A_ : Any = [{"type": "code", "content": INSTALL_CONTENT}]
A_ : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 720 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : List[str] = DownloadConfig
A_ : int = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : Optional[int] = DownloadConfig
A_ : List[Any] = DownloadMode
A_ : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 616 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = 0
__snake_case = len(snake_case) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case = i + 1
else:
__snake_case = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""") | 564 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[int] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : int = (32, 32)
__lowerCAmelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_snake_case )
@property
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
def extract(*_snake_case : List[str] , **_snake_case : List[Any] ):
class snake_case_ :
def __init__( self : Tuple )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = torch.ones([0] )
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[str] )->Tuple:
'''simple docstring'''
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Tuple )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.dummy_cond_unet
__lowerCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
__lowerCAmelCase : Optional[int] = self.dummy_vae
__lowerCAmelCase : List[Any] = self.dummy_text_encoder
__lowerCAmelCase : int = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__lowerCAmelCase : Optional[int] = 77
__lowerCAmelCase : Dict = self.dummy_image.to(_snake_case )
__lowerCAmelCase : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Dict = AltDiffusionImgaImgPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
__lowerCAmelCase : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_snake_case )
__lowerCAmelCase : Optional[int] = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowerCAmelCase : List[str] = alt_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_snake_case , )
__lowerCAmelCase : Dict = output.images
__lowerCAmelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowerCAmelCase : Tuple = alt_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_snake_case , return_dict=_snake_case , )[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = self.dummy_cond_unet
__lowerCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=_snake_case )
__lowerCAmelCase : Dict = self.dummy_vae
__lowerCAmelCase : Dict = self.dummy_text_encoder
__lowerCAmelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__lowerCAmelCase : List[Any] = 77
__lowerCAmelCase : List[str] = self.dummy_image.to(_snake_case )
# put models in fp16
__lowerCAmelCase : List[str] = unet.half()
__lowerCAmelCase : Optional[int] = vae.half()
__lowerCAmelCase : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
__lowerCAmelCase : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_snake_case )
__lowerCAmelCase : str = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : List[str] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = alt_pipe(
[prompt] , generator=_snake_case , num_inference_steps=2 , output_type="""np""" , image=_snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase__ ( self : Optional[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase : List[Any] = init_image.resize((760, 504) )
__lowerCAmelCase : Tuple = """BAAI/AltDiffusion"""
__lowerCAmelCase : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__lowerCAmelCase : int = torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = pipe(
prompt=_snake_case , image=_snake_case , strength=0.75 , guidance_scale=7.5 , generator=_snake_case , output_type="""np""" , )
__lowerCAmelCase : Optional[int] = output.images[0]
__lowerCAmelCase : Union[str, Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__lowerCAmelCase : Union[str, Any] = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] )->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCAmelCase : str = init_image.resize((768, 512) )
__lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
__lowerCAmelCase : Optional[Any] = """BAAI/AltDiffusion"""
__lowerCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCAmelCase : Optional[int] = """A fantasy landscape, trending on artstation"""
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Any = pipe(
prompt=_snake_case , image=_snake_case , strength=0.75 , guidance_scale=7.5 , generator=_snake_case , output_type="""np""" , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 504 | 0 |
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = "examples/"
lowerCAmelCase__ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase__ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase__ = "README.md"
def _lowerCAmelCase( __A , __A , __A ):
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("VERSION" , UpperCamelCase__ )
UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
def _lowerCAmelCase( __A ):
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern="examples" )
def _lowerCAmelCase( __A , __A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def _lowerCAmelCase( ):
UpperCAmelCase = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase = "1. Want to contribute a new model?"
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase__ )
def _lowerCAmelCase( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["init"][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCAmelCase( ):
UpperCAmelCase = get_version()
UpperCAmelCase = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCamelCase__ )
print("Cleaning main README, don\'t forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_snake_case : Any = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["pixel_values"]
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ):
super().__init__(**a_ )
a_ : Any = size if size is not None else {"shortest_edge": 2_5_6}
a_ : Any = get_size_dict(a_ , default_to_square=a_ )
a_ : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a_ : Optional[int] = get_size_dict(a_ , param_name="crop_size" )
a_ : List[Any] = do_resize
a_ : List[str] = size
a_ : Dict = resample
a_ : int = do_center_crop
a_ : List[Any] = crop_size
a_ : Union[str, Any] = do_rescale
a_ : str = rescale_factor
a_ : Any = do_normalize
a_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , a_ , a_ , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ):
a_ : Tuple = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a_ : Optional[int] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ , ):
a_ : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ = None , **a_ ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ , a_ , a_ = None , **a_ , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
a_ : int = do_resize if do_resize is not None else self.do_resize
a_ : Any = size if size is not None else self.size
a_ : Dict = get_size_dict(a_ , default_to_square=a_ )
a_ : int = resample if resample is not None else self.resample
a_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Any = crop_size if crop_size is not None else self.crop_size
a_ : Dict = get_size_dict(a_ , param_name="crop_size" )
a_ : str = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a_ : int = image_mean if image_mean is not None else self.image_mean
a_ : Dict = image_std if image_std is not None else self.image_std
a_ : int = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a_ : Optional[int] = [to_numpy_array(a_ ) for image in images]
if do_resize:
a_ : Union[str, Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
a_ : Union[str, Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
a_ : int = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
a_ : List[str] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
a_ : Tuple = [to_channel_dimension_format(a_ , a_ ) for image in images]
a_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def snake_case_ ( self , a_ , a_ = None ):
a_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
a_ : str = target_sizes.numpy()
a_ : int = []
for idx in range(len(a_ ) ):
a_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a_ )
a_ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
a_ : List[str] = logits.argmax(dim=1 )
a_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 237 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE = {
"gpt-neox-20b": 2048,
}
class lowerCamelCase ( lowercase__ ):
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
class __UpperCamelCase : # Public class to implement a graph
def __init__( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> None:
"""simple docstring"""
__lowercase = row
__lowercase = col
__lowercase = graph
def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> None:
"""simple docstring"""
__lowercase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowercase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowercase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def _a ( self : Dict ) -> int: # And finally, count all islands.
"""simple docstring"""
__lowercase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowercase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 80 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''t5'''
__UpperCAmelCase : Any = ['''past_key_values''']
__UpperCAmelCase : Tuple = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] ,_a : List[Any]=3_2128 ,_a : int=512 ,_a : Union[str, Any]=64 ,_a : Dict=2048 ,_a : List[Any]=6 ,_a : int=None ,_a : Union[str, Any]=8 ,_a : Optional[int]=32 ,_a : Tuple=128 ,_a : Optional[Any]=0.1 ,_a : List[Any]=1E-6 ,_a : List[str]=1.0 ,_a : Any="relu" ,_a : Any=True ,_a : str=True ,_a : Optional[int]=0 ,_a : List[Any]=1 ,**_a : str ,):
'''simple docstring'''
_a : int = vocab_size
_a : Optional[int] = d_model
_a : Optional[Any] = d_kv
_a : List[Any] = d_ff
_a : Dict = num_layers
_a : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a : int = num_heads
_a : Optional[Any] = relative_attention_num_buckets
_a : Tuple = relative_attention_max_distance
_a : str = dropout_rate
_a : Union[str, Any] = layer_norm_epsilon
_a : Union[str, Any] = initializer_factor
_a : str = feed_forward_proj
_a : List[Any] = use_cache
_a : Optional[Any] = self.feed_forward_proj.split('-' )
_a : Any = act_info[-1]
_a : Dict = act_info[0] == 'gated'
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_a : List[str] = 'gelu_new'
super().__init__(
pad_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,**_a ,)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_a : Optional[int] = 'past_encoder_sequence + sequence'
_a : Optional[int] = {0: 'batch'}
_a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
_a : Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_a ,direction='inputs' )
return common_inputs
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return 13
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 1 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : str =re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__A , __A ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 94 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : List[str] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : List[Any] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
lowercase : Tuple =do_resize
lowercase : Any =size
lowercase : Optional[Any] =do_center_crop
lowercase : str =crop_size
lowercase : Any =resample
lowercase : List[Any] =do_rescale
lowercase : Dict =rescale_factor
lowercase : List[str] =do_normalize
lowercase : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
lowercase : int =get_resize_output_image_size(UpperCAmelCase , size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase : str =(size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =to_numpy_array(UpperCAmelCase )
if do_resize:
lowercase : Union[str, Any] =self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
lowercase : Optional[Any] =self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
lowercase : Dict =self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
lowercase : int =self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
lowercase : Optional[Any] =to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def A__ ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Any =do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] =resample if resample is not None else self.resample
lowercase : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : str =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Optional[Any] =size if size is not None else self.size
lowercase : Any =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : List[str] =make_batched(UpperCAmelCase )
lowercase : Union[str, Any] =[
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
lowercase : Dict ={'''pixel_values''': videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132 | """simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a =get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
a =5
a =10
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
A__ : Dict = SpeechaTextTokenizer
A__ : Any = False
A__ : Any = True
def _a ( self ):
super().setUp()
lowerCamelCase__ =sp.SentencePieceProcessor()
spm_model.Load(_lowerCamelCase )
lowerCamelCase__ =["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCamelCase ) )]
lowerCamelCase__ =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCamelCase__ =Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCamelCase__ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ):
lowerCamelCase__ ="<pad>"
lowerCamelCase__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowerCamelCase ) , 1001 )
def _a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def _a ( self ):
lowerCamelCase__ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase__ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [289, 50, 14, 174, 386] , )
lowerCamelCase__ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCamelCase__ =tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase__ =tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _a ( self ):
# fmt: off
lowerCamelCase__ ={"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class __UpperCAmelCase ( unittest.TestCase ):
A__ : Optional[int] = '''valhalla/s2t_mustc_multilinguial_medium'''
A__ : Optional[int] = '''C\'est trop cool'''
A__ : Optional[Any] = '''Esto es genial'''
@classmethod
def _a ( cls ):
lowerCamelCase__ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _a ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def _a ( self ):
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def _a ( self ):
self.assertIn(_lowerCamelCase , self.tokenizer.all_special_ids )
lowerCamelCase__ =[ES_CODE, 4, 1601, 47, 7647, 2]
lowerCamelCase__ =self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
lowerCamelCase__ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ ="fr"
lowerCamelCase__ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowerCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _a ( self ):
lowerCamelCase__ ="fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCamelCase__ ="es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 132 | 1 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , *__A : str , **__A : Union[str, Any] ):
super().__init__(*__A , **__A )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[Any] ):
__A : Tuple = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__A )
__A : Optional[int] = self.values[key]
def lowerCAmelCase_ ( self : Any ):
return (
sum(self.charge_factor - len(__A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase_ ( self : Optional[int] , __A : int , __A : Union[str, Any]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__A ) == 0
):
return key
return super()._collision_resolution(__A , __A )
| 17 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
return array
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ), max(lowerCAmelCase_ )
# Compute the variables
__SCREAMING_SNAKE_CASE = _max - _min + 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__SCREAMING_SNAKE_CASE = i - _min
__SCREAMING_SNAKE_CASE = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase_ ):
while holes_repeat[i] > 0:
__SCREAMING_SNAKE_CASE = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = input('''Enter numbers separated by comma:\n''')
a__ : Optional[int] = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 553 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Any:
__SCREAMING_SNAKE_CASE = vqa_pipeline(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
pass
| 553 | 1 |