code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( lowercase , lowercase = True , lowercase = math.inf , lowercase = -math.inf , lowercase = math.inf , lowercase = -math.inf , lowercase = False , lowercase = 100 , lowercase = 0.01 , lowercase = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Tuple = search_prob
SCREAMING_SNAKE_CASE : int = start_temperate
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = None
while not search_end:
SCREAMING_SNAKE_CASE : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE : int = current_state
scores.append(lowerCAmelCase_ )
iterations += 1
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE : Any = random.randint(0 , len(lowerCAmelCase_ ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE : Optional[int] = neighbors.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE : Optional[int] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE : int = picked_neighbor
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE : Dict = picked_neighbor
SCREAMING_SNAKE_CASE : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase_ ) , lowerCAmelCase_ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 62 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : Any = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : int = {f"funnel-transformer/{name}": 512 for name in _model_names}
__lowerCamelCase : str = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : List[str] = FunnelTokenizer
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = 2
def __init__(self : Tuple , A__ : Optional[int]=None , A__ : List[Any]=None , A__ : Optional[int]=True , A__ : Optional[int]="<unk>" , A__ : List[Any]="<sep>" , A__ : Optional[int]="<pad>" , A__ : str="<cls>" , A__ : Any="<mask>" , A__ : int="<s>" , A__ : Union[str, Any]="</s>" , A__ : str=True , A__ : int=True , A__ : Dict=None , A__ : Union[str, Any]="##" , **A__ : str , ) -> Union[str, Any]:
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , bos_token=A__ , eos_token=A__ , clean_text=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , wordpieces_prefix=A__ , **A__ , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A__ ) != do_lower_case
or normalizer_state.get("strip_accents" , A__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__ ) != tokenize_chinese_chars
):
lowercase = getattr(A__ , normalizer_state.pop("type" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**A__ )
lowercase = do_lower_case
def UpperCAmelCase__ (self : List[Any] , A__ : Optional[int] , A__ : Any=None ) -> Dict:
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self : Union[str, Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
lowercase = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 310 | 0 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float = 1E-12 , snake_case_ : int = 100 , ):
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ )
__magic_name__ = np.iscomplexobj(snake_case_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 1E12
while not convergence:
# Multiple matrix by the vector.
__magic_name__ = np.dot(snake_case_ , snake_case_ )
# Normalize the resulting output vector.
__magic_name__ = w / np.linalg.norm(snake_case_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__magic_name__ = vector.conj().T if is_complex else vector.T
__magic_name__ = np.dot(snake_case_ , np.dot(snake_case_ , snake_case_ ) )
# Check convergence.
__magic_name__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__magic_name__ = True
__magic_name__ = lambda_
if is_complex:
__magic_name__ = np.real(lambda_ )
return lambda_, vector
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__magic_name__ = np.array([41, 4, 20] )
__magic_name__ = real_input_matrix.astype(np.complexaaa )
__magic_name__ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__magic_name__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__magic_name__ = real_input_matrix
__magic_name__ = real_vector
elif problem_type == "complex":
__magic_name__ = complex_input_matrix
__magic_name__ = complex_vector
# Our implementation.
__magic_name__ , __magic_name__ = power_iteration(snake_case_ , snake_case_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__magic_name__ , __magic_name__ = np.linalg.eigh(snake_case_ )
# Last eigenvalue is the maximum one.
__magic_name__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__magic_name__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def snake_case__ ( _A: Any ) -> int:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case__ ( _A: Any , _A: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowerCAmelCase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowerCAmelCase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowerCAmelCase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowerCAmelCase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowerCAmelCase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowerCAmelCase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowerCAmelCase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowerCAmelCase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowerCAmelCase = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowerCAmelCase = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowerCAmelCase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowerCAmelCase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowerCAmelCase = key.replace("""text_projection""" , """flava.text_projection""" )
lowerCAmelCase = key.replace("""image_projection""" , """flava.image_projection""" )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def snake_case__ ( _A: Dict , _A: Union[str, Any] , _A: Tuple , _A: int=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(_A )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(_A ).eval()
lowerCAmelCase = convert_dalle_checkpoint(_A , _A , save_checkpoint=_A )
if os.path.exists(_A ):
lowerCAmelCase = torch.load(_A , map_location="""cpu""" )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
lowerCAmelCase = upgrade_state_dict(_A , _A )
hf_model.load_state_dict(_A )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(_A )
lowerCAmelCase = count_parameters(_A ) + count_parameters(_A )
assert torch.allclose(_A , _A , atol=1e-3 )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 370 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mobilenet_v1'''
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=224 , __lowerCAmelCase=1.0 , __lowerCAmelCase=8 , __lowerCAmelCase="relu6" , __lowerCAmelCase=True , __lowerCAmelCase=0.999 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.001 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""")
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = depth_multiplier
lowerCAmelCase = min_depth
lowerCAmelCase = hidden_act
lowerCAmelCase = tf_padding
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = version.parse('''1.11''' )
@property
def a_ ( self):
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})])
@property
def a_ ( self):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})])
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})])
@property
def a_ ( self):
"""simple docstring"""
return 1E-4
| 370 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 487 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = "laion/clap-htsat-unfused"
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
def a_ ( self : Optional[Any] , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = self.get_feature_extractor()
__UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : Any = self.get_feature_extractor(do_normalize=UpperCamelCase_ , padding_value=1.0)
__UpperCAmelCase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Any = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = floats_list((3, 1000))
__UpperCAmelCase : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = processor(audios=UpperCamelCase_ , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : str = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Any = "This is a test string"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Dict = tokenizer(UpperCamelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.get_feature_extractor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[str] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 487 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> str:
'''simple docstring'''
lowerCamelCase__ =1
lowerCamelCase__ =2
while i * i <= n:
lowerCamelCase__ =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase__ =1
lowerCamelCase__ =1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 530 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCamelCase ( __a = "isbn/0140328726" ):
SCREAMING_SNAKE_CASE_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
SCREAMING_SNAKE_CASE_ = F'{olid} is not a valid Open Library olid'
raise ValueError(__a )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
SCREAMING_SNAKE_CASE_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = ''', '''.join(__a )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCAmelCase__ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 626 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = '''lxmert'''
__lowercase : Any = {}
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=9_5_0_0 , lowerCAmelCase__=1_6_0_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=9 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6.67 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = num_qa_labels
__SCREAMING_SNAKE_CASE = num_object_labels
__SCREAMING_SNAKE_CASE = num_attr_labels
__SCREAMING_SNAKE_CASE = l_layers
__SCREAMING_SNAKE_CASE = x_layers
__SCREAMING_SNAKE_CASE = r_layers
__SCREAMING_SNAKE_CASE = visual_feat_dim
__SCREAMING_SNAKE_CASE = visual_pos_dim
__SCREAMING_SNAKE_CASE = visual_loss_normalizer
__SCREAMING_SNAKE_CASE = task_matched
__SCREAMING_SNAKE_CASE = task_mask_lm
__SCREAMING_SNAKE_CASE = task_obj_predict
__SCREAMING_SNAKE_CASE = task_qa
__SCREAMING_SNAKE_CASE = visual_obj_loss
__SCREAMING_SNAKE_CASE = visual_attr_loss
__SCREAMING_SNAKE_CASE = visual_feat_loss
__SCREAMING_SNAKE_CASE = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowerCAmelCase__)
| 248 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__magic_name__ = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""") as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""") as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""")[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split()) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self):
return len(self.encoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , lowerCAmelCase__):
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(lowerCAmelCase__):
try:
__SCREAMING_SNAKE_CASE = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__SCREAMING_SNAKE_CASE = tuple(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = new_word
if len(lowerCAmelCase__) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = word
return word
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(""" """))
return bpe_tokens
def snake_case_ ( self , lowerCAmelCase__):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def snake_case_ ( self , lowerCAmelCase__):
return self.decoder.get(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text]).decode("""utf-8""" , errors=self.errors)
return text
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + """\n""")
__SCREAMING_SNAKE_CASE = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""")
__SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(lowerCAmelCase__) + """\n""")
index += 1
return vocab_file, merge_file
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = kwargs.pop("""add_prefix_space""" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = """ """ + text
return (text, kwargs)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text)
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """ """.join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__)
if len(lowerCAmelCase__) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 248 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "encodec"
def __init__(self , _lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowercase=24000 , _lowercase=1 , _lowercase=False , _lowercase=None , _lowercase=None , _lowercase=128 , _lowercase=32 , _lowercase=1 , _lowercase=[8, 5, 4, 2] , _lowercase="weight_norm" , _lowercase=7 , _lowercase=7 , _lowercase=3 , _lowercase=2 , _lowercase=True , _lowercase="reflect" , _lowercase=2 , _lowercase=2 , _lowercase=1.0 , _lowercase=1024 , _lowercase=None , _lowercase=True , **_lowercase , ):
'''simple docstring'''
__a : List[str] = target_bandwidths
__a : Optional[Any] = sampling_rate
__a : List[Any] = audio_channels
__a : Optional[Any] = normalize
__a : Optional[Any] = chunk_length_s
__a : Optional[int] = overlap
__a : List[Any] = hidden_size
__a : str = num_filters
__a : Tuple = num_residual_layers
__a : List[Any] = upsampling_ratios
__a : List[Any] = norm_type
__a : Optional[Any] = kernel_size
__a : List[str] = last_kernel_size
__a : Union[str, Any] = residual_kernel_size
__a : str = dilation_growth_rate
__a : List[Any] = use_causal_conv
__a : Tuple = pad_mode
__a : str = compress
__a : Dict = num_lstm_layers
__a : Optional[int] = trim_right_ratio
__a : str = codebook_size
__a : int = codebook_dim if codebook_dim is not None else hidden_size
__a : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 581 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 581 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __lowercase ):
@staticmethod
@abstractmethod
def snake_case_ ( _lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 146 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE: Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__SCREAMING_SNAKE_CASE: Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' )
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''tester'''
__SCREAMING_SNAKE_CASE: Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE: Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__SCREAMING_SNAKE_CASE: int = tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.get_input_output_texts(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.tokenize(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
__SCREAMING_SNAKE_CASE: List[str] = tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
| 146 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase__ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase__ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "README.md" )
print(F"Generating {path}" )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase_ )
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_, a_, a_ = model_name.split('''-''')
a_ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 339 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase__ :
a_ =PegasusConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = model.decode(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = 20
lowerCAmelCase__ = model_class_name(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase__ , lowerCAmelCase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
lowerCAmelCase__ = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = np.not_equal(UpperCamelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a_ =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a_ =True
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = FlaxPegasusModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest("JIT Enabled" ):
lowerCAmelCase__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__UpperCAmelCase )
lowerCAmelCase__ = np.ones((1, 1) )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
lowerCAmelCase__ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
lowerCAmelCase__ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowerCAmelCase__ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" , truncation=__UpperCAmelCase , max_length=512 , padding=__UpperCAmelCase )
lowerCAmelCase__ = model.generate(**__UpperCAmelCase , num_beams=2 ).sequences
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert tgt_text == decoded
| 339 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def _snake_case ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCamelCase )
@property
def _snake_case ( self : Any ):
def extract(*__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE = torch.ones([0] )
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] ):
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(pipe.scheduler , __lowerCamelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = vae.half()
SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
SCREAMING_SNAKE_CASE = 4003660346
SCREAMING_SNAKE_CASE = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "padme amidala taking a bath artwork, safe for work, no nudity"
SCREAMING_SNAKE_CASE = 2734971755
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
SCREAMING_SNAKE_CASE = 1044355234
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 705 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'Wav2Vec2FeatureExtractor'
lowerCamelCase_ = 'AutoTokenizer'
def __init__( self : Tuple , __A : Dict , __A : Optional[Any] ) ->str:
"""simple docstring"""
super().__init__(__A , __A )
a__ :Tuple = self.feature_extractor
a__ :int = False
@classmethod
def _snake_case ( cls : List[str] , __A : Union[str, Any] , **__A : List[Any] ) ->Any:
"""simple docstring"""
try:
return super().from_pretrained(__A , **__A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __A , )
a__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(__A , **__A )
a__ :Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(__A , **__A )
return cls(feature_extractor=__A , tokenizer=__A )
def __call__( self : Dict , *__A : Tuple , **__A : str ) ->List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a__ :Optional[int] = kwargs.pop("raw_speech" )
else:
a__ :Optional[Any] = kwargs.pop("audio" , __A )
a__ :Tuple = kwargs.pop("sampling_rate" , __A )
a__ :List[str] = kwargs.pop("text" , __A )
if len(__A ) > 0:
a__ :Optional[Any] = args[0]
a__ :Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a__ :List[Any] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a__ :int = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ :str = encodings["input_ids"]
return inputs
def _snake_case ( self : Optional[int] , *__A : Optional[int] , **__A : Optional[int] ) ->List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__A , **__A )
a__ :str = kwargs.pop("input_features" , __A )
a__ :str = kwargs.pop("labels" , __A )
if len(__A ) > 0:
a__ :Any = args[0]
a__ :Optional[int] = args[1:]
if input_features is not None:
a__ :List[str] = self.feature_extractor.pad(__A , *__A , **__A )
if labels is not None:
a__ :Any = self.tokenizer.pad(__A , **__A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__ :int = labels["input_ids"]
return input_features
def _snake_case ( self : Optional[int] , *__A : Optional[Any] , **__A : List[Any] ) ->int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self : Any , *__A : List[str] , **__A : Dict ) ->Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def _snake_case ( self : str ) ->List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a__ :List[str] = True
a__ :List[Any] = self.tokenizer
yield
a__ :Union[str, Any] = self.feature_extractor
a__ :Optional[int] = False
| 395 |
import math
snake_case__ = 10
snake_case__ = 7
snake_case__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( a : int = 20 ) -> str:
"""simple docstring"""
a__ :List[str] = math.comb(a , a )
a__ :Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
a__ :Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 395 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _a ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : List[str]=False , lowercase__ : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
SCREAMING_SNAKE_CASE__ : List[str] = cached_file(lowercase__ , lowercase__ , force_download=not use_cached_models )
SCREAMING_SNAKE_CASE__ : Tuple = config_class.from_json_file(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
print(f'''Building TensorFlow model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ : str = model_class(lowercase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
SCREAMING_SNAKE_CASE__ : Dict = cached_file(
lowercase__ , lowercase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
SCREAMING_SNAKE_CASE__ : Optional[int] = load_pytorch_checkpoint_in_tfa_model(lowercase__ , lowercase__ )
if compare_with_pt_model:
SCREAMING_SNAKE_CASE__ : Tuple = tf_model(tf_model.dummy_inputs , training=lowercase__ ) # build the network
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(lowercase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Optional[int] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = pt_model(**pt_model.dummy_inputs )
SCREAMING_SNAKE_CASE__ : int = pto[0].numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = tfo[0].numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowercase__ , save_format='h5' )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Union[str, Any]=None , lowercase__ : List[str]=None , lowercase__ : Any=False , lowercase__ : Union[str, Any]=False , lowercase__ : Union[str, Any]=False , lowercase__ : int=False , ):
'''simple docstring'''
if args_model_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [args_model_type]
for j, model_type in enumerate(lowercase__ , start=1 ):
print('=' * 1_00 )
print(f''' Converting model type {j}/{len(lowercase__ )}: {model_type}''' )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE__ : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowercase__ , lowercase__ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
SCREAMING_SNAKE_CASE__ : int = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(lowercase__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
SCREAMING_SNAKE_CASE__ : List[Any] = cached_file(lowercase__ , lowercase__ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE__ : Any = config_shortcut_name
if model_shortcut_name in aws_model_maps:
SCREAMING_SNAKE_CASE__ : Tuple = cached_file(lowercase__ , lowercase__ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE__ : List[str] = model_shortcut_name
if os.path.isfile(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=lowercase__ , pytorch_checkpoint_path=lowercase__ , config_file=lowercase__ , tf_dump_path=os.path.join(lowercase__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=lowercase__ , )
if remove_cached_files:
os.remove(lowercase__ )
os.remove(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 711 | import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636 | 0 |
'''simple docstring'''
from collections import namedtuple
_SCREAMING_SNAKE_CASE = namedtuple("from_to", "from_ to")
_SCREAMING_SNAKE_CASE = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 10_00),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A__ : Tuple =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =['''pixel_values''']
def __init__( self : Union[str, Any] , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 2_55 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
__A : str = size if size is not None else {"""shortest_edge""": 2_24}
__A : Tuple = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__A : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__A : Union[str, Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="""crop_size""" )
__A : List[Any] = do_resize
__A : int = size
__A : int = resample
__A : Dict = do_center_crop
__A : int = crop_size
__A : Optional[int] = do_rescale
__A : Union[str, Any] = rescale_factor
__A : Tuple = do_normalize
__A : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
__A : int = do_convert_rgb
def lowercase_( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ):
"""simple docstring"""
__A : Dict = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__A : int = get_resize_output_image_size(lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase_( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
__A : str = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase_( self : int , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : int , ):
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase_( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ):
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase_( self : List[str] , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : int = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ):
"""simple docstring"""
__A : List[Any] = do_resize if do_resize is not None else self.do_resize
__A : Dict = size if size is not None else self.size
__A : List[str] = get_size_dict(lowerCamelCase , param_name="""size""" , default_to_square=lowerCamelCase )
__A : Optional[Any] = resample if resample is not None else self.resample
__A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Tuple = crop_size if crop_size is not None else self.crop_size
__A : Tuple = get_size_dict(lowerCamelCase , param_name="""crop_size""" , default_to_square=lowerCamelCase )
__A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__A : int = image_mean if image_mean is not None else self.image_mean
__A : Any = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : Optional[int] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Dict = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__A : List[str] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__A : Dict = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__A : int = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__A : List[str] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__A : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__A : Any = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 499 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Any =logging.get_logger(__name__)
A__ : Any ={
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip_vision_model'''
def __init__( self : Tuple , lowerCamelCase : Optional[int]=14_08 , lowerCamelCase : str=61_44 , lowerCamelCase : List[Any]=39 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Optional[int]=2_24 , lowerCamelCase : Any=14 , lowerCamelCase : str="gelu" , lowerCamelCase : str=1e-6 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Dict=1e-1_0 , lowerCamelCase : Optional[Any]=True , **lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
__A : int = hidden_size
__A : List[str] = intermediate_size
__A : Tuple = num_hidden_layers
__A : str = num_attention_heads
__A : str = patch_size
__A : Dict = image_size
__A : Any = initializer_range
__A : int = attention_dropout
__A : str = layer_norm_eps
__A : Optional[Any] = hidden_act
__A : List[str] = qkv_bias
@classmethod
def lowercase_( cls : Union[str, Any] , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : str ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
__A , __A : int = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__A : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip_qformer'''
def __init__( self : Tuple , lowerCamelCase : int=3_05_22 , lowerCamelCase : Tuple=7_68 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Tuple=12 , lowerCamelCase : str=30_72 , lowerCamelCase : int="gelu" , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int=5_12 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[str]=1e-1_2 , lowerCamelCase : int=0 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : List[Any]=14_08 , **lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__A : List[Any] = vocab_size
__A : List[str] = hidden_size
__A : str = num_hidden_layers
__A : str = num_attention_heads
__A : List[Any] = hidden_act
__A : str = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : List[Any] = position_embedding_type
__A : List[str] = cross_attention_frequency
__A : Dict = encoder_hidden_size
@classmethod
def lowercase_( cls : Any , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
__A , __A : List[str] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__A : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''instructblip'''
lowerCamelCase =True
def __init__( self : Any , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=32 , **lowerCamelCase : int ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
if vision_config is None:
__A : int = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
__A : Dict = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
__A : Any = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__A : List[Any] = InstructBlipVisionConfig(**lowerCamelCase )
__A : Union[str, Any] = InstructBlipQFormerConfig(**lowerCamelCase )
__A : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__A : List[str] = CONFIG_MAPPING[text_model_type](**lowerCamelCase )
__A : Optional[int] = self.text_config.tie_word_embeddings
__A : Dict = self.text_config.is_encoder_decoder
__A : Optional[int] = num_query_tokens
__A : int = self.vision_config.hidden_size
__A : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A : Optional[Any] = 1.0
__A : Optional[int] = 0.02
@classmethod
def lowercase_( cls : List[str] , lowerCamelCase : InstructBlipVisionConfig , lowerCamelCase : InstructBlipQFormerConfig , lowerCamelCase : PretrainedConfig , **lowerCamelCase : Optional[int] , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase , )
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : Tuple = copy.deepcopy(self.__dict__ )
__A : Optional[int] = self.vision_config.to_dict()
__A : Optional[Any] = self.qformer_config.to_dict()
__A : List[Any] = self.text_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 499 | 1 |
def _A ( _lowercase = 1_00_00_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
__UpperCamelCase = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = None
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case_ )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(snake_case_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(snake_case_ )
__lowercase = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
__lowercase = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : list[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase__ = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ ,lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ : str = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 48 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any]):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str]):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Optional[Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[Any]):
requires_backends(cls , ["torch", "transformers", "onnx"])
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Optional[Any] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Any):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : List[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any]):
requires_backends(cls , ["torch", "transformers", "onnx"])
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self : List[str] , *__UpperCAmelCase : Any , **__UpperCAmelCase : List[Any]):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Optional[Any] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Optional[int] , *__UpperCAmelCase : int , **__UpperCAmelCase : Tuple):
requires_backends(cls , ["torch", "transformers", "onnx"])
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any]):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Any):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Optional[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[int]):
requires_backends(cls , ["torch", "transformers", "onnx"])
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[Any]):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : int):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any]):
requires_backends(cls , ["torch", "transformers", "onnx"])
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[Any]):
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : List[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Tuple):
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __snake_case ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[str]):
requires_backends(cls , ["torch", "transformers", "onnx"])
| 135 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowercase ( A_ , A_ , A_ )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A_ , 2 ) + pow(A_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase =datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1_0000
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ParquetConfig
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCamelCase__ : Optional[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase__ : Optional[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCamelCase__ : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase__ : Optional[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
UpperCamelCase__ : Union[str, Any] = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase__ : str = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
UpperCamelCase__ : Union[str, Any] = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase__ : Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise
| 285 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase ={
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCamelCase =logging.WARNING
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = os.getenv('''DATASETS_VERBOSITY''' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def SCREAMING_SNAKE_CASE_ ( ):
return __name__.split('''.''' )[0]
def SCREAMING_SNAKE_CASE_ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = None ):
if name is None:
UpperCamelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
_get_library_root_logger().setLevel(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
return set_verbosity(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : str = False
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase__ : Dict = args[0] if args else None
def __iter__( self ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
def empty_fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[int]:
"""simple docstring"""
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return
lowerCamelCase =True
class _lowerCamelCase :
"""simple docstring"""
def __call__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase =_tqdm_cls()
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
UpperCamelCase__ : Dict = True
def SCREAMING_SNAKE_CASE_ ( ):
global _tqdm_active
UpperCamelCase__ : Union[str, Any] = False
| 285 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _lowerCAmelCase ( A__: List[str] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase = k.replace(__snake_case , __snake_case )
if k.startswith('''encoder''' ):
UpperCAmelCase = k.replace('''.attn''' , '''.self_attn''' )
UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
UpperCAmelCase = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCAmelCase = sd.pop(__snake_case )
UpperCAmelCase = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
UpperCAmelCase = v
__magic_name__ = ["START"]
@torch.no_grad()
def _lowerCAmelCase ( A__: int , A__: Dict , A__: List[Any] ):
'''simple docstring'''
UpperCAmelCase = torch.load(__snake_case , map_location='''cpu''' )
UpperCAmelCase = model["model"]
UpperCAmelCase = BlenderbotConfig.from_json_file(__snake_case )
UpperCAmelCase = BlenderbotForConditionalGeneration(__snake_case )
UpperCAmelCase = m.model.state_dict().keys()
UpperCAmelCase = []
UpperCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__magic_name__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 713 |
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(A__ , max_perimeter + 1 ):
UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(A__ ):
UpperCAmelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = pythagorean_triple(A__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 391 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: list[list[int]] = []
_lowercase: list[int] = []
_lowercase: Any = 0
_lowercase: int = sum(_UpperCamelCase )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return result
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
if sum(_UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCamelCase )) < max_sum:
return
if sum(_UpperCamelCase ) == max_sum:
result.append(_UpperCamelCase )
return
for index in range(_UpperCamelCase , len(_UpperCamelCase ) ):
create_state_space_tree(
_UpperCamelCase , _UpperCamelCase , index + 1 , [*path, nums[index]] , _UpperCamelCase , remaining_nums_sum - nums[index] , )
A__ : Dict = [3, 3_4, 4, 1_2, 5, 2]
A__ : Union[str, Any] = 9
A__ : int = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 353 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__ : List[Any] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
_lowercase: Any = global_rng
_lowercase: List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> List[Any]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: int = batch_size
_lowercase: Tuple = min_seq_length
_lowercase: Any = max_seq_length
_lowercase: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase: Any = padding_value
_lowercase: List[str] = sampling_rate
_lowercase: Union[str, Any] = return_attention_mask
_lowercase: Optional[Any] = do_normalize
_lowercase: List[str] = feature_size
_lowercase: Optional[Any] = chunk_length
_lowercase: str = hop_length
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , A_=False , A_=False ) -> List[Any]:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_lowercase: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase: Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase: str = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Any = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_lowercase: str = self.feature_extraction_class.from_pretrained(A_ )
_lowercase: Dict = feat_extract_first.to_dict()
_lowercase: List[str] = feat_extract_second.to_dict()
_lowercase: List[str] = feat_extract_first.mel_filters
_lowercase: str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
_lowercase: Tuple = self.feature_extraction_class.from_json_file(A_ )
_lowercase: int = feat_extract_first.to_dict()
_lowercase: Optional[int] = feat_extract_second.to_dict()
_lowercase: Tuple = feat_extract_first.mel_filters
_lowercase: Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase: List[str] = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_lowercase: Dict = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase: Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
_lowercase: Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase: Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase: Any = np.asarray(A_ )
_lowercase: Tuple = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
_lowercase: List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase: List[Any] = [np.asarray(A_ ) for speech_input in speech_inputs]
_lowercase: Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase: str = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_lowercase: Optional[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_lowercase: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase: Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase: int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowercase: List[str] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_lowercase: Optional[Any] = self._load_datasamples(1 )
_lowercase: Optional[int] = WhisperFeatureExtractor()
_lowercase: Dict = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[str] = self._load_datasamples(1 )[0]
_lowercase: Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowercase: Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 353 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _A ( _lowerCAmelCase = 1_000_000 , _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =defaultdict(_lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowercase =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowercase =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 454 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = BartTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str="replace" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Dict="</s>" , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Dict="<mask>" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase ='post_processor'
__lowercase =getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
if tokenizer_component_instance:
__lowercase =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase =tuple(state['sep'])
if "cls" in state:
__lowercase =tuple(state['cls'])
__lowercase =False
if state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =add_prefix_space
__lowercase =True
if state.get('trim_offsets' , _lowerCAmelCase) != trim_offsets:
__lowercase =trim_offsets
__lowercase =True
if changes_to_apply:
__lowercase =getattr(_lowerCAmelCase , state.pop('type'))
__lowercase =component_class(**_lowerCAmelCase)
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else value
__lowercase =value
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None):
'''simple docstring'''
__lowercase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 454 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A_ , A_ , A_ : Union[str, Any] = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase__ = field(default='''Audio''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return self.pa_type
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Tuple = BytesIO()
sf.write(__SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
snake_case__ : str = BytesIO(bytes() )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case__ , snake_case__ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
snake_case__ : Optional[Any] = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case__ : str = token_per_repo_id or {}
snake_case__ : Tuple = path.split("""::""" )[-1]
try:
snake_case__ : str = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case__ : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : Dict = None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
snake_case__ , snake_case__ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE )
else:
snake_case__ , snake_case__ : Tuple = sf.read(__SCREAMING_SNAKE_CASE )
snake_case__ : str = array.T
if self.mono:
snake_case__ : str = librosa.to_mono(__SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : List[Any] = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
snake_case__ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if pa.types.is_string(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
snake_case__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case__ : Dict = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case__ : Tuple = storage.field("""bytes""" )
else:
snake_case__ : Any = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case__ : List[Any] = storage.field("""path""" )
else:
snake_case__ : Union[str, Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case__ : int = f.read()
return bytes_
snake_case__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : Optional[Any] = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_2_8_0_2_2
SCREAMING_SNAKE_CASE__ : List[Any] = 1_2_8_0_2_8
@require_sentencepiece
class a__( snake_case__ , unittest.TestCase ):
a_ : List[str] = MaMaaaTokenizer
a_ : Dict = False
a_ : Union[str, Any] = False
a_ : Dict = True
def _lowercase ( self ) -> str:
super().setUp()
snake_case__ =['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
snake_case__ =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case__ =Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
snake_case__ =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase ) -> int:
return (
"This is a test",
"This is a test",
)
def _lowercase ( self ) -> Dict:
snake_case__ ='</s>'
snake_case__ =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.get_tokenizer()
snake_case__ =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_UpperCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _lowercase ( self ) -> int:
pass
def _lowercase ( self ) -> Any:
snake_case__ =self.get_tokenizer()
snake_case__ =tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2, 3, 4, 5, 6] , )
snake_case__ =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
snake_case__ =tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , 'This is a test' )
@slow
def _lowercase ( self ) -> Optional[int]:
# fmt: off
snake_case__ ={'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__( unittest.TestCase ):
a_ : Optional[Any] = '''facebook/m2m100_418M'''
a_ : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
a_ : Tuple = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
a_ : str = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def _lowercase ( cls ) -> int:
snake_case__ =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
snake_case__ =1
return cls
def _lowercase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _lowercase ( self ) -> Dict:
snake_case__ =self.tokenizer.get_vocab()
self.assertEqual(len(_UpperCAmelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _UpperCAmelCase )
def _lowercase ( self ) -> Any:
snake_case__ ='en'
snake_case__ =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def _lowercase ( self ) -> str:
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
snake_case__ =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case__ =self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
snake_case__ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
snake_case__ =tempfile.mkdtemp()
snake_case__ =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_UpperCAmelCase )
snake_case__ =MaMaaaTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id , _UpperCAmelCase )
@require_torch
def _lowercase ( self ) -> Dict:
snake_case__ ='en'
snake_case__ ='fr'
snake_case__ =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors='pt' )
snake_case__ =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case__ =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _lowercase ( self ) -> Tuple:
snake_case__ ='mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case__ ='zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _lowercase ( self ) -> List[Any]:
snake_case__ ='mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ ='zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 581 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=99 , _UpperCAmelCase=0 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase="last" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0 , ) -> Optional[int]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_lengths
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =gelu_activation
snake_case__ =sinusoidal_embeddings
snake_case__ =causal
snake_case__ =asm
snake_case__ =n_langs
snake_case__ =vocab_size
snake_case__ =n_special
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =summary_type
snake_case__ =use_proj
snake_case__ =scope
snake_case__ =bos_token_id
def _lowercase ( self ) -> Any:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
if self.use_input_lengths:
snake_case__ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ =None
if self.use_token_type_ids:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , 2 ).float()
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowercase ( self ) -> Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Tuple:
snake_case__ =XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
snake_case__ =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Dict:
snake_case__ =XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((snake_case__) , ) =result_with_labels.to_tuple()
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((snake_case__) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Any:
snake_case__ =XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
snake_case__ =self.num_labels
snake_case__ =XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
snake_case__ =self.num_choices
snake_case__ =XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> str:
snake_case__ =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a_ : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
snake_case__ =super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _lowercase ( self ) -> Optional[int]:
snake_case__ =XLMModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> Dict:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =min_length + idx + 1
snake_case__ =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> int:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def _lowercase ( self ) -> Dict:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> str:
snake_case__ =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_UpperCAmelCase )
snake_case__ =torch.tensor([[14, 447]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
snake_case__ =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case__ =model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
| 581 | 1 |
def UpperCamelCase ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = []
_lowercase : Optional[int] = set({"(", "[", "{"} )
_lowercase : str = set({")", "]", "}"} )
_lowercase : List[str] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def UpperCamelCase ( ) -> int:
'''simple docstring'''
_lowercase : str = input("Enter sequence of brackets: " )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase , "is balanced" )
else:
print(_UpperCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 461 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase ( __snake_case ):
def __init__(self : Dict , snake_case : str , snake_case : str=13 , snake_case : Union[str, Any]=7 , snake_case : int=True , snake_case : Any=True , snake_case : str=False , snake_case : Optional[Any]=True , snake_case : Optional[Any]=99 , snake_case : Dict=32 , snake_case : Union[str, Any]=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : Optional[int]="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : List[Any]=512 , snake_case : List[Any]=16 , snake_case : Optional[int]=2 , snake_case : Tuple=0.02 , snake_case : Union[str, Any]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> List[Any]:
_lowercase : Dict = parent
_lowercase : int = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : int = is_training
_lowercase : Dict = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : int = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Optional[Any] = num_choices
_lowercase : str = scope
def _a(self : int ) -> Dict:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Any ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : Dict ) -> Optional[int]:
_lowercase : Optional[int] = DistilBertModel(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[Any] = model(snake_case , snake_case )
_lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
_lowercase : Optional[int] = DistilBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a(self : Tuple , snake_case : List[str] , snake_case : Any , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : str ) -> Any:
_lowercase : Dict = DistilBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[str] = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
_lowercase : str = self.num_labels
_lowercase : Any = DistilBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a(self : int , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str ) -> str:
_lowercase : str = self.num_labels
_lowercase : List[str] = DistilBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a(self : List[str] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] ) -> Optional[Any]:
_lowercase : str = self.num_choices
_lowercase : Dict = DistilBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a(self : List[str] ) -> List[str]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = True
_A = True
def _a(self : Dict ) -> List[Any]:
_lowercase : Optional[Any] = DistilBertModelTester(self )
_lowercase : str = ConfigTester(self , config_class=snake_case , dim=37 )
def _a(self : int ) -> List[str]:
self.config_tester.run_common_tests()
def _a(self : Optional[Any] ) -> Optional[int]:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case )
def _a(self : Any ) -> int:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case )
def _a(self : Dict ) -> List[Any]:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case )
def _a(self : str ) -> Tuple:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case )
def _a(self : Any ) -> List[Any]:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case )
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case )
@slow
def _a(self : Optional[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DistilBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowercase : str = True
_lowercase : Tuple = model_class(config=snake_case )
_lowercase : str = self._prepare_for_class(snake_case , snake_case )
_lowercase : Optional[int] = torch.jit.trace(
snake_case , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , "traced_model.pt" ) )
_lowercase : Dict = torch.jit.load(os.path.join(snake_case , "traced_model.pt" ) , map_location=snake_case )
loaded(inputs_dict["input_ids"].to(snake_case ) , inputs_dict["attention_mask"].to(snake_case ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : int ) -> str:
_lowercase : Any = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowercase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowercase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
_lowercase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
_lowercase : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) )
| 461 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Union[str, Any] =SwinConfig(image_size=192 )
if "base" in model_name:
__magic_name__ : Tuple =6
__magic_name__ : List[str] =128
__magic_name__ : Any =(2, 2, 18, 2)
__magic_name__ : Dict =(4, 8, 16, 32)
elif "large" in model_name:
__magic_name__ : Union[str, Any] =12
__magic_name__ : Tuple =192
__magic_name__ : int =(2, 2, 18, 2)
__magic_name__ : Any =(6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
__magic_name__ : Union[str, Any] =window_size
__magic_name__ : Optional[Any] =embed_dim
__magic_name__ : Optional[int] =depths
__magic_name__ : int =num_heads
return config
def lowerCAmelCase_ ( lowerCamelCase ):
if "encoder.mask_token" in name:
__magic_name__ : Union[str, Any] =name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
__magic_name__ : Tuple =name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
__magic_name__ : Tuple =name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
__magic_name__ : str =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__magic_name__ : Tuple =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__magic_name__ : Any =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__magic_name__ : Dict =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__ : Dict =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__ : Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
__magic_name__ : Any ="""layernorm.weight"""
if name == "encoder.norm.bias":
__magic_name__ : Any ="""layernorm.bias"""
if "decoder" in name:
pass
else:
__magic_name__ : str ="""swin.""" + name
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__magic_name__ : str =orig_state_dict.pop(lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__magic_name__ : Union[str, Any] =key.split(""".""" )
__magic_name__ : str =int(key_split[2] )
__magic_name__ : Optional[Any] =int(key_split[4] )
__magic_name__ : List[str] =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ : Any =val[:dim, :]
__magic_name__ : Dict =val[
dim : dim * 2, :
]
__magic_name__ : Dict =val[-dim:, :]
else:
__magic_name__ : Any =val[
:dim
]
__magic_name__ : Union[str, Any] =val[
dim : dim * 2
]
__magic_name__ : Optional[int] =val[
-dim:
]
else:
__magic_name__ : Union[str, Any] =val
return orig_state_dict
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
__magic_name__ : Union[str, Any] =get_swin_config(lowerCamelCase )
__magic_name__ : List[Any] =SwinForMaskedImageModeling(lowerCamelCase )
model.eval()
__magic_name__ : Union[str, Any] =convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
__magic_name__ : List[str] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Dict =ViTImageProcessor(size={"""height""": 192, """width""": 192} )
__magic_name__ : Optional[Any] =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__magic_name__ : Union[str, Any] =image_processor(images=lowerCamelCase , return_tensors="""pt""" )
with torch.no_grad():
__magic_name__ : List[str] =model(**lowerCamelCase ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 367 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , ):
__magic_name__ : Optional[int] ={}
if train_file is not None:
__magic_name__ : Optional[int] =[train_file]
if eval_file is not None:
__magic_name__ : Any =[eval_file]
if test_file is not None:
__magic_name__ : int =[test_file]
__magic_name__ : Any =datasets.load_dataset("""csv""" , data_files=lowerCamelCase )
__magic_name__ : Optional[Any] =list(ds[list(files.keys() )[0]].features.keys() )
__magic_name__ : Optional[Any] =features_name.pop(lowerCamelCase )
__magic_name__ : str =list(set(ds[list(files.keys() )[0]][label_name] ) )
__magic_name__ : Union[str, Any] ={label: i for i, label in enumerate(lowerCamelCase )}
__magic_name__ : Dict =tokenizer.model_input_names
__magic_name__ : Any ={}
if len(lowerCamelCase ) == 1:
for k in files.keys():
__magic_name__ : Dict =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" ) , batched=lowerCamelCase , )
elif len(lowerCamelCase ) == 2:
for k in files.keys():
__magic_name__ : Optional[Any] =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" , ) , batched=lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__magic_name__ : Any ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Any =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__magic_name__ : Dict ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : str =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__magic_name__ : Union[str, Any] ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Optional[int] =labelaid[ex[label_name]]
yield (d, label)
__magic_name__ : Union[str, Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__magic_name__ : Optional[Any] =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__magic_name__ : Optional[Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__magic_name__ : Any =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__magic_name__ : Any =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__magic_name__ : Optional[int] =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class __A :
UpperCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the training file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the development file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the test file"""} )
UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __A :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__magic_name__ : Any =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase ) , labelaid=lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__magic_name__ : Any =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase ) -> Dict:
__magic_name__ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__magic_name__ : int =TFTrainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ : List[str] ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : List[str] =trainer.evaluate()
__magic_name__ : Optional[Any] =os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 367 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
_UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
_UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Use FP16 to accelerate inference."} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Benchmark training of model"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Verbose memory tracing"} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Trace memory line by line"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Save result to a CSV file"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Save all print statements in a log file"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Whether to print environment information"} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
_UpperCamelCase : str = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
_UpperCamelCase : str = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
_UpperCamelCase : str = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
_UpperCamelCase : str = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
_UpperCamelCase : str = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
_UpperCamelCase : str = field(
default=F"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
_UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __a ( self ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _lowerCAmelCase , )
def __a ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __a ( self ):
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __a ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 66 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = LEDTokenizer
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> Any:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
lowerCAmelCase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**lowercase )
lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase = """post_processor"""
lowerCAmelCase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase = tuple(state["""cls"""] )
lowerCAmelCase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(lowercase , state.pop("""type""" ) )
lowerCAmelCase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _snake_case ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowerCAmelCase = value
def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _snake_case ( self , lowercase , lowercase=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
lowerCAmelCase = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
lowerCAmelCase = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 532 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __UpperCAmelCase ( __lowercase ):
"""simple docstring"""
_snake_case : int = 0
_snake_case : bool = False
_snake_case : float = 3.0
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Dict )-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__A ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def A ( self : Any )-> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __A )
@require_multi_gpu
def A ( self : List[str] )-> List[str]:
__UpperCamelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
_A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_A = Accelerator(kwargs_handlers=[ddp_scaler])
_A = torch.nn.Linear(100, 200)
_A = accelerator.prepare(model)
# Check the values changed in kwargs
_A = ""
_A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 714 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] , *A_ : Dict , **A_ : Any )-> Any:
super().__init__(*A_ , **A_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A ( self : int , A_ : List[Any]=None )-> Tuple:
__UpperCamelCase = {}
if top_k is not None:
__UpperCamelCase = top_k
return {}, {}, postprocess_params
def __call__( self : str , A_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **A_ : Tuple )-> str:
return super().__call__(A_ , **A_ )
def A ( self : Union[str, Any] , A_ : int )-> Tuple:
__UpperCamelCase = load_image(A_ )
__UpperCamelCase = self.image_processor(images=A_ , return_tensors=self.framework )
return model_inputs
def A ( self : str , A_ : Union[str, Any] )-> Union[str, Any]:
__UpperCamelCase = self.model(**A_ )
return model_outputs
def A ( self : Union[str, Any] , A_ : Any , A_ : Tuple=5 )-> int:
if top_k > self.model.config.num_labels:
__UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
__UpperCamelCase , __UpperCamelCase = probs.topk(A_ )
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__UpperCamelCase = tf.math.top_k(A_ , k=A_ )
__UpperCamelCase , __UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__UpperCamelCase = scores.tolist()
__UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A_ , A_ )] | 228 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
A = MODEL_FOR_CAUSAL_LM_MAPPING
A = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self :Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCamelCase__ = text_generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = text_generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
] , )
@require_tf
def lowerCamelCase__ ( self :Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self :str ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = "Hello I believe in"
UpperCamelCase__ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = text_generator(__a )
self.assertEqual(
__a , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCamelCase__ = text_generator(__a , stop_sequence=" fe" )
self.assertEqual(__a , [{"generated_text": "Hello I believe in fe"}] )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCamelCase__ = pipeline(task="text-generation" , model=__a , tokenizer=__a , return_full_text=__a )
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCamelCase__ = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
UpperCamelCase__ = text_generator("test" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
UpperCamelCase__ = text_generator("test" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
UpperCamelCase__ = text_generator("test" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
UpperCamelCase__ = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self :Optional[Any] ) -> str:
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__a , top_p=0.5 )
def lowerCamelCase__ ( self :int ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = "Hello world"
UpperCamelCase__ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCamelCase__ = logging.get_logger("transformers.generation.utils" )
UpperCamelCase__ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
UpperCamelCase__ = text_generator(__a , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
UpperCamelCase__ = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
UpperCamelCase__ = text_generator(__a , max_length=1_0 )
self.assertNotIn(__a , cl.out )
| 712 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = KandinskyVaaPipeline
A = [
'image_embeds',
'negative_image_embeds',
]
A = ['image_embeds', 'negative_image_embeds']
A = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def lowerCamelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase__ ( self :Any ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase__ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase__ ( self :Tuple ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def lowerCamelCase__ ( self :str ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase_ , )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int]=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCamelCase__ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase__ = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
UpperCamelCase__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = "red cat, 4k photo"
UpperCamelCase__ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase__ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ = pipeline(
image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_0_0 , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) | 304 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : UNetaDModel
_SCREAMING_SNAKE_CASE : KarrasVeScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , _UpperCamelCase = 1 , _UpperCamelCase = 50 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : List[str] = self.unet.config.sample_size
_lowercase : Optional[int] = (batch_size, 3, img_size, img_size)
_lowercase : Dict = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowercase : Tuple = self.scheduler.schedule[t]
_lowercase : List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase : Union[str, Any] = self.scheduler.add_noise_to_input(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase : List[str] = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowercase : Dict = self.scheduler.step_correct(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , step_output.prev_sample , step_output["derivative"] , )
_lowercase : int = step_output.prev_sample
_lowercase : List[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_lowercase : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 245 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : str = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : int = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : str = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Any = "I was born in 92000, and this is falsé."
_lowercase : Dict = tokenizer.tokenize(_UpperCamelCase )
_lowercase : List[Any] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : int = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : Optional[Any] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : int = "This is a simple input"
_lowercase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : str = ("This is a simple input", "This is a pair")
_lowercase : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : Dict = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Optional[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : Tuple = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : int = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : Optional[int] = encoded_sequence["input_ids"].shape
_lowercase : List[Any] = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Dict = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 245 | 1 |
"""simple docstring"""
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = name
lowercase = value
lowercase = weight
def __repr__( self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCamelCase_ ( self ):
return self.value
def UpperCamelCase_ ( self ):
return self.name
def UpperCamelCase_ ( self ):
return self.weight
def UpperCamelCase_ ( self ):
return self.value / self.weight
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
lowercase = sorted(__snake_case , key=__snake_case , reverse=__snake_case )
lowercase = []
lowercase , lowercase = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['prompt']
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
if "image" in inputs:
lowercase = inputs['image']
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs['mask_image']
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs['original_image']
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = inputs['generator']
lowercase = inputs['num_inference_steps']
lowercase = inputs['output_type']
# inputs with prompt converted to embeddings
lowercase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCamelCase_ ( self ):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(_lowerCamelCase )
lowercase = pipe_loaded(**_lowerCamelCase )[0]
lowercase = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
| 134 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = state_dict.pop(__snake_case )
__lowerCamelCase = val
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase = key.replace('''backbone.0.body''' ,'''backbone.conv_encoder.model''' )
__lowerCamelCase = value
else:
__lowerCamelCase = value
return new_state_dict
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:2_56, :]
__lowerCamelCase = in_proj_bias[:2_56]
__lowerCamelCase = in_proj_weight[2_56:5_12, :]
__lowerCamelCase = in_proj_bias[2_56:5_12]
__lowerCamelCase = in_proj_weight[-2_56:, :]
__lowerCamelCase = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:2_56, :]
__lowerCamelCase = in_proj_bias[:2_56]
__lowerCamelCase = in_proj_weight[2_56:5_12, :]
__lowerCamelCase = in_proj_bias[2_56:5_12]
__lowerCamelCase = in_proj_weight[-2_56:, :]
__lowerCamelCase = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase = in_proj_weight_cross_attn[:2_56, :]
__lowerCamelCase = in_proj_bias_cross_attn[:2_56]
__lowerCamelCase = in_proj_weight_cross_attn[2_56:5_12, :]
__lowerCamelCase = in_proj_bias_cross_attn[2_56:5_12]
__lowerCamelCase = in_proj_weight_cross_attn[-2_56:, :]
__lowerCamelCase = in_proj_bias_cross_attn[-2_56:]
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
__lowerCamelCase = image.size
__lowerCamelCase = max(__snake_case ,__snake_case )
__lowerCamelCase = 8_00 if """detection""" in checkpoint_url else 10_00
__lowerCamelCase = target_max_size / current_max_size
__lowerCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = F.to_tensor(__snake_case )
__lowerCamelCase = F.normalize(__snake_case ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : int ,_UpperCamelCase : List[str] ):
logger.info('''Converting model...''' )
# load original state dict
__lowerCamelCase = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__snake_case ,__snake_case ,__snake_case )
__lowerCamelCase = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = """model."""
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowerCamelCase = state_dict.pop(__snake_case )
__lowerCamelCase = val
# create HuggingFace model and load state dict
__lowerCamelCase = TableTransformerConfig(
backbone='''resnet18''' ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = {0: """table""", 1: """table rotated"""}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
else:
__lowerCamelCase = 1_25
__lowerCamelCase = 6
__lowerCamelCase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = DetrImageProcessor(
format='''coco_detection''' ,max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
__lowerCamelCase = TableTransformerForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion
__lowerCamelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__lowerCamelCase = hf_hub_download(repo_id='''nielsr/example-pdf''' ,repo_type='''dataset''' ,filename=__snake_case )
__lowerCamelCase = Image.open(__snake_case ).convert('''RGB''' )
__lowerCamelCase = normalize(resize(__snake_case ,__snake_case ) ).unsqueeze(0 )
__lowerCamelCase = model(__snake_case )
if "detection" in checkpoint_url:
__lowerCamelCase = (1, 15, 3)
__lowerCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__lowerCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__lowerCamelCase = (1, 1_25, 7)
__lowerCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__lowerCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,__snake_case ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,__snake_case ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
__lowerCamelCase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__snake_case )
image_processor.push_to_hub(__snake_case )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 215 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = '''blenderbot-small'''
_UpperCamelCase : Any = ['''past_key_values''']
_UpperCamelCase : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , _A : List[Any]=50_265 , _A : str=512 , _A : Optional[int]=8 , _A : Dict=2_048 , _A : List[str]=16 , _A : Tuple=8 , _A : Optional[int]=2_048 , _A : List[Any]=16 , _A : int=0.0 , _A : Optional[int]=0.0 , _A : Optional[int]=True , _A : str=True , _A : Union[str, Any]="gelu" , _A : Union[str, Any]=512 , _A : List[Any]=0.1 , _A : Optional[Any]=0.0 , _A : Tuple=0.0 , _A : Union[str, Any]=0.02 , _A : List[Any]=1 , _A : Optional[int]=False , _A : str=0 , _A : Tuple=1 , _A : Dict=2 , _A : Union[str, Any]=2 , **_A : Tuple , ) -> List[str]:
"""simple docstring"""
lowercase : Dict = vocab_size
lowercase : List[str] = max_position_embeddings
lowercase : Optional[Any] = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Union[str, Any] = encoder_layers
lowercase : List[str] = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : Union[str, Any] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : str = dropout
lowercase : List[str] = attention_dropout
lowercase : Optional[int] = activation_dropout
lowercase : List[str] = activation_function
lowercase : Optional[Any] = init_std
lowercase : Union[str, Any] = encoder_layerdrop
lowercase : List[str] = decoder_layerdrop
lowercase : List[str] = use_cache
lowercase : Union[str, Any] = encoder_layers
lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class _A ( _lowerCamelCase ):
@property
def __a ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase : int = {0: '''batch'''}
lowercase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase : int = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase : List[Any] = self.num_layers
for i in range(_A ):
lowercase : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase : int = super().outputs
else:
lowercase : Union[str, Any] = super(_A , self ).outputs
if self.use_past:
lowercase , lowercase : int = self.num_layers
for i in range(_A ):
lowercase : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __a ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
lowercase : Dict = seq_length if not self.use_past else 1
lowercase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
lowercase : Any = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase : Tuple = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : Optional[int] = common_inputs['''input_ids'''].shape
lowercase : Optional[int] = common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase : List[str] = self.num_attention_heads
lowercase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Any = decoder_seq_length + 3
lowercase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_A , _A )] , dim=1 )
lowercase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase : Any = self.num_layers
lowercase : List[Any] = min(_A , _A )
lowercase : Union[str, Any] = max(_A , _A ) - min_num_layers
lowercase : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
lowercase : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __a ( self : str , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : Optional[int] = seqlen + 2
lowercase , lowercase : int = self.num_layers
lowercase , lowercase : List[str] = self.num_attention_heads
lowercase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : List[Any] = common_inputs['''attention_mask'''].dtype
lowercase : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
lowercase : Union[str, Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __a ( self : int , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase : int = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Any = tokenizer.num_special_tokens_to_add(_A )
lowercase : Any = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
lowercase : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : Optional[Any] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def __a ( self : int , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
lowercase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
lowercase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def __a ( self : Union[str, Any] , _A : str , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] ) -> Dict:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase : Any = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
lowercase : Any = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A ) | 596 |
import os
lowerCAmelCase_ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Any = 0
lowercase : Any = 0
while index < len(__magic_name__ ) - 1:
lowercase : List[Any] = SYMBOLS[numerals[index]]
lowercase : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[Any] = ''''''
lowercase : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase : Optional[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def snake_case( __magic_name__ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 0
with open(os.path.dirname(__magic_name__ ) + roman_numerals_filename ) as filea:
lowercase : List[str] = filea.readlines()
for line in lines:
lowercase : Dict = line.strip()
lowercase : Optional[int] = parse_roman_numerals(__magic_name__ )
lowercase : List[Any] = generate_roman_numerals(__magic_name__ )
savings += len(__magic_name__ ) - len(__magic_name__ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''') | 596 | 1 |
"""simple docstring"""
import numpy as np
def A_ ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
_UpperCamelCase :Optional[int] = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCamelCase :Optional[Any] = False
_UpperCamelCase :Union[str, Any] = 0
_UpperCamelCase :Union[str, Any] = 0
_UpperCamelCase :Optional[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
_UpperCamelCase :Dict = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
_UpperCamelCase :str = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCamelCase :int = vector.conj().T if is_complex else vector.T
_UpperCamelCase :int = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
_UpperCamelCase :Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCamelCase :Union[str, Any] = True
_UpperCamelCase :Optional[Any] = lambda_
if is_complex:
_UpperCamelCase :Any = np.real(lambda_ )
return lambda_, vector
def A_ ( ) -> None:
_UpperCamelCase :Optional[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCamelCase :int = np.array([41, 4, 20] )
_UpperCamelCase :Tuple = real_input_matrix.astype(np.complexaaa )
_UpperCamelCase :Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCamelCase :List[str] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCamelCase :Tuple = real_input_matrix
_UpperCamelCase :List[str] = real_vector
elif problem_type == "complex":
_UpperCamelCase :Optional[int] = complex_input_matrix
_UpperCamelCase :int = complex_vector
# Our implementation.
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCamelCase , _UpperCamelCase :Union[str, Any] = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
_UpperCamelCase :str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCamelCase :Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 355 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A( lowerCamelCase__ ):
"""simple docstring"""
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
return 0.0
def A_ ( snake_case__ , snake_case__ ) -> tuple[int | float, int | float]:
_UpperCamelCase :Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCamelCase :Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( snake_case__ , snake_case__ ) -> None:
_UpperCamelCase :List[str] = 5_12
_UpperCamelCase :int = [1] + [0] * (size - 1)
_UpperCamelCase :Union[str, Any] = [filter_type.process(snake_case__ ) for item in inputs]
_UpperCamelCase :Any = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCamelCase :int = np.abs(np.fft.fft(snake_case__ ) )
_UpperCamelCase :str = 20 * np.logaa(snake_case__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
_UpperCamelCase :Tuple = get_bounds(snake_case__ , snake_case__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(snake_case__ )
plt.show()
def A_ ( snake_case__ , snake_case__ ) -> None:
_UpperCamelCase :Tuple = 5_12
_UpperCamelCase :Union[str, Any] = [1] + [0] * (size - 1)
_UpperCamelCase :List[Any] = [filter_type.process(snake_case__ ) for item in inputs]
_UpperCamelCase :str = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCamelCase :Tuple = np.angle(np.fft.fft(snake_case__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(snake_case__ , -2 * pi ) )
plt.show()
| 355 | 1 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Tuple = PriorTransformer
lowercase__ : Optional[Any] = """hidden_states"""
@property
def lowercase_ ( self ):
'''simple docstring'''
A__ = 4
A__ = 8
A__ = 7
A__ = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self , UpperCamelCase__=0 ):
'''simple docstring'''
torch.manual_seed(UpperCamelCase__ )
A__ = 4
A__ = 8
A__ = 7
A__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self ):
'''simple docstring'''
return (4, 8)
@property
def lowercase_ ( self ):
'''simple docstring'''
return (4, 8)
def lowercase_ ( self ):
'''simple docstring'''
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
A__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCamelCase__ )
A__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.model_class(**UpperCamelCase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
A__ = model.to(UpperCamelCase__ )
if hasattr(UpperCamelCase__ , "set_default_attn_processor" ):
model.set_default_attn_processor()
A__ = self.get_dummy_seed_input()
with torch.no_grad():
A__ = model(**UpperCamelCase__ )[0]
A__ = output[0, :5].flatten().cpu()
print(UpperCamelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
A__ = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self , UpperCamelCase__=1 , UpperCamelCase__=7_68 , UpperCamelCase__=77 , UpperCamelCase__=0 ):
'''simple docstring'''
torch.manual_seed(UpperCamelCase__ )
A__ = batch_size
A__ = embedding_dim
A__ = num_embeddings
A__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
A__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(UpperCamelCase__ )
A__ = self.get_dummy_seed_input(seed=UpperCamelCase__ )
with torch.no_grad():
A__ = model(**UpperCamelCase__ )[0]
assert list(sample.shape ) == [1, 7_68]
A__ = sample[0, :8].flatten().cpu()
print(UpperCamelCase__ )
A__ = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) | 261 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCAmelCase ="""base_with_context"""
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Dict:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Union[str, Any]:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
A__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
A__ = ly_weight["self_attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = ly_weight["MultiHeadDotProductAttention_0"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __a ( A ) -> str:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ = jnp.tree_util.tree_map(onp.array , A )
A__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
A__ = inference.parse_training_gin_file(A , A )
A__ = inference.InferenceModel(args.checkpoint_path , A )
A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
A__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , A )
A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , A )
A__ = load_decoder(ta_checkpoint["target"]["decoder"] , A )
A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
A__ = SpectrogramDiffusionPipeline(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__UpperCAmelCase =parser.parse_args()
main(args) | 261 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ = '''BridgeTowerImageProcessor'''
UpperCAmelCase_ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict ):
"""simple docstring"""
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : str , UpperCamelCase : List[str] , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Tuple , ):
"""simple docstring"""
_lowercase : int = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel_values + pixel_mask
_lowercase : Union[str, Any] = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , do_normalize=UpperCamelCase , do_center_crop=UpperCamelCase , **UpperCamelCase )
encoding.update(UpperCamelCase )
return encoding
def lowerCAmelCase_ ( self : Any , *UpperCamelCase : int , **UpperCamelCase : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : Optional[Any] = self.tokenizer.model_input_names
_lowercase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 322 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 322 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def a ( __a , __a=1000 ) -> Optional[int]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCamelCase__ :Optional[Any] = n - 1
UpperCamelCase__ :Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCamelCase__ :int = 0
while count < prec:
UpperCamelCase__ :Optional[int] = random.randint(2 , n - 1 )
UpperCamelCase__ :Optional[Any] = bin_exp_mod(A_ , A_ , A_ )
if b != 1:
UpperCamelCase__ :int = True
for _ in range(A_ ):
if b == n - 1:
UpperCamelCase__ :Dict = False
break
UpperCamelCase__ :Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__snake_case = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 710 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=[32, 64, 128] , UpperCamelCase_=[1, 2, 1] , UpperCamelCase_=[2, 2, 4] , UpperCamelCase_=2 , UpperCamelCase_=2.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.02 , UpperCamelCase_=1e-5 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=8 , UpperCamelCase_=["stage1", "stage2"] , UpperCamelCase_=[1, 2] , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = image_size
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[int] = hidden_sizes
UpperCamelCase__ :List[str] = depths
UpperCamelCase__ :List[str] = num_heads
UpperCamelCase__ :Dict = window_size
UpperCamelCase__ :str = mlp_ratio
UpperCamelCase__ :List[str] = qkv_bias
UpperCamelCase__ :Optional[Any] = hidden_dropout_prob
UpperCamelCase__ :List[Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = drop_path_rate
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :Optional[Any] = use_absolute_embeddings
UpperCamelCase__ :str = patch_norm
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :int = is_training
UpperCamelCase__ :List[Any] = scope
UpperCamelCase__ :Dict = use_labels
UpperCamelCase__ :Dict = type_sequence_label_size
UpperCamelCase__ :Tuple = encoder_stride
UpperCamelCase__ :Optional[int] = out_features
UpperCamelCase__ :Optional[int] = out_indices
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_labels:
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Tuple = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = FocalNetModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :str = model(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ :Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = FocalNetBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Any = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase__ :Tuple = None
UpperCamelCase__ :Optional[Any] = FocalNetBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[str] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = FocalNetForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ :Optional[Any] = FocalNetForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.type_sequence_label_size
UpperCamelCase__ :Optional[Any] = FocalNetForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Tuple = FocalNetForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = config_and_inputs
UpperCamelCase__ :Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_a = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = FocalNetModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=37 , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Optional[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :List[str] = model_class(UpperCamelCase_ )
UpperCamelCase__ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase__ :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :List[Any] = outputs.hidden_states
UpperCamelCase__ :Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# FocalNet has a different seq_length
UpperCamelCase__ :List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__ :List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = reshaped_hidden_states[0].shape
UpperCamelCase__ :List[str] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :List[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ :Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ :Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :List[str] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :List[str] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Union[str, Any] = FocalNetModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Dict = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCamelCase__ :int = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.default_image_processor
UpperCamelCase__ :str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ :Any = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Tuple = model(**UpperCamelCase_ )
# verify the logits
UpperCamelCase__ :Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = (FocalNetBackbone,) if is_torch_available() else ()
_a = FocalNetConfig
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = FocalNetModelTester(self ) | 280 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowercase : Dict ={
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
state_dict.pop("pixel_mean" , lowercase__ )
state_dict.pop("pixel_std" , lowercase__ )
UpperCAmelCase_ =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
UpperCAmelCase_ =int(re.match(lowercase__ , lowercase__ ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase_ =key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase_ =key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase_ =key.replace("layers.2" , "proj_out" )
UpperCAmelCase_ =value
UpperCAmelCase_ =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__="ybelkada/segment-anything" ):
'''simple docstring'''
UpperCAmelCase_ =hf_hub_download(lowercase__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
UpperCAmelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
elif "sam_vit_h" in model_name:
UpperCAmelCase_ =SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCAmelCase_ =SamConfig(
vision_config=lowercase__ , )
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ =replace_keys(lowercase__ )
UpperCAmelCase_ =SamImageProcessor()
UpperCAmelCase_ =SamProcessor(image_processor=lowercase__ )
UpperCAmelCase_ =SamModel(lowercase__ )
hf_model.load_state_dict(lowercase__ )
UpperCAmelCase_ =hf_model.to("cuda" )
UpperCAmelCase_ ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
UpperCAmelCase_ =[[[4_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1]]
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase_ =((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCAmelCase_ =processor(images=np.array(lowercase__ ) , input_boxes=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase_ =[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCAmelCase_ =[[1, 1]]
UpperCAmelCase_ =processor(
images=np.array(lowercase__ ) , input_points=lowercase__ , input_labels=lowercase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ =hf_model(**lowercase__ )
UpperCAmelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowercase : Optional[Any] =argparse.ArgumentParser()
__lowercase : List[Any] =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowercase : List[Any] =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 54 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : List[Any] = 1_6
A__ : Union[str, Any] = 3_2
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
_lowercase: Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase: List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase: Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase: Tuple = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase: Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase: Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase: int = 16
elif accelerator.mixed_precision != "no":
_lowercase: Tuple = 8
else:
_lowercase: str = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase: List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
_lowercase: int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCamelCase ) == "1":
_lowercase: Tuple = 2
# Initialize accelerator
_lowercase: Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: Optional[Any] = config['''lr''']
_lowercase: Tuple = int(config['''num_epochs'''] )
_lowercase: Any = int(config['''seed'''] )
_lowercase: List[Any] = int(config['''batch_size'''] )
_lowercase: Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowercase: List[str] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
_lowercase , _lowercase: Tuple = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
_lowercase: str = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: Union[str, Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase: Tuple = model(**_UpperCamelCase )
_lowercase: Union[str, Any] = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: str = model(**_UpperCamelCase )
_lowercase: int = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase: List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
_lowercase: int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowercase: Tuple = parser.parse_args()
_lowercase: str = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 353 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase : int = '''<<<<<<< This should probably be modified because it mentions: '''
__lowerCamelCase : List[Any] = '''=======
>>>>>>>
'''
__lowerCamelCase : str = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__lowerCamelCase : Optional[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def lowercase__ ( __A: Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path ,args.datasets_directory )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : ArgumentParser ) -> Tuple:
__magic_name__ : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , *lowerCamelCase_ : str ) -> Any:
__magic_name__ : Optional[Any] = get_logger('''datasets-cli/converting''' )
__magic_name__ : List[Any] = tfds_path
__magic_name__ : Any = datasets_directory
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
if os.path.isdir(self._tfds_path ):
__magic_name__ : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__magic_name__ : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__magic_name__ : str = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__magic_name__ : Dict = []
__magic_name__ : Tuple = []
__magic_name__ : Any = {}
if os.path.isdir(self._tfds_path ):
__magic_name__ : List[Any] = os.listdir(lowerCamelCase_ )
else:
__magic_name__ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
__magic_name__ : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isfile(lowerCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
__magic_name__ : Optional[Any] = f.readlines()
__magic_name__ : str = []
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
__magic_name__ : str = []
for line in lines:
__magic_name__ : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__magic_name__ : Optional[Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__magic_name__ : Tuple = ''''''
continue
elif "from absl import logging" in out_line:
__magic_name__ : Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__magic_name__ : Any = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__magic_name__ : int = True
__magic_name__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : e in out_line , lowerCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase_ ) + '''\n''' )
out_lines.append(lowerCamelCase_ )
out_lines.append(lowerCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
__magic_name__ : Tuple = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__magic_name__ : Tuple = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , lowerCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__magic_name__ : Tuple = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__magic_name__ : Dict = True
out_lines.append(lowerCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__magic_name__ : Union[str, Any] = f_name.replace('''.py''' , '''''' )
__magic_name__ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : List[str] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase_ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase_ )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(lowerCamelCase_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__magic_name__ : Optional[Any] = os.path.basename(lowerCamelCase_ )
__magic_name__ : List[str] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase_ , lowerCamelCase_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 501 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =None
UpperCamelCase__ =None
UpperCamelCase__ =None
UpperCamelCase__ =None
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : int="cls" , lowerCamelCase_ : Dict=False , lowerCamelCase_ : List[Any]=True , **lowerCamelCase_ : Optional[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Dict = project_dim
__magic_name__ : int = pooler_fn
__magic_name__ : str = learn_encoder
__magic_name__ : Optional[int] = use_attention_mask
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =[R'''pooler''', R'''logit_scale''']
UpperCamelCase__ =[R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase__ ='''roberta'''
UpperCamelCase__ =RobertaSeriesConfig
def __init__( self : str , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
__magic_name__ : Tuple = XLMRobertaModel(lowerCamelCase_ )
__magic_name__ : Any = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ : Optional[int] = getattr(lowerCamelCase_ , '''has_pre_transformation''' , lowerCamelCase_ )
if self.has_pre_transformation:
__magic_name__ : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , ) -> List[Any]:
__magic_name__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : Any = self.base_model(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCamelCase_ , )
if self.has_pre_transformation:
__magic_name__ : Any = outputs['''hidden_states'''][-2]
__magic_name__ : Dict = self.pre_LN(lowerCamelCase_ )
__magic_name__ : int = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__magic_name__ : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 501 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _a ( lowercase__ : str , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = multiprocessing.Manager()
SCREAMING_SNAKE_CASE__ : Tuple = manager.list()
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _a ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : int ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__ : Tuple = shutil.rmtree
SCREAMING_SNAKE_CASE__ : Dict = os.rmdir
SCREAMING_SNAKE_CASE__ : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rmtree
SCREAMING_SNAKE_CASE__ : Any = rmdir
SCREAMING_SNAKE_CASE__ : Any = chdir
@contextlib.contextmanager
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( io.StringIO ):
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Any )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> Dict:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Any , **a_ : Optional[int] )-> List[str]:
"""simple docstring"""
return False
class snake_case ( contextlib._RedirectStream ): # type: ignore
lowercase_ = 'stdin'
@contextlib.contextmanager
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__ : str = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _a ( lowercase__ : Optional[int]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = '1'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
import shutil
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import subprocess
SCREAMING_SNAKE_CASE__ : Tuple = None # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
import sys
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
| 85 | import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( lowercase__ : int = 3 ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' )
SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 85 | 1 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> str:
re.sub("""<n>""" , """""" , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) ) | 720 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_UpperCAmelCase = {
'facebook/xglm-564M': 2048,
}
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _snake_case : Dict , _snake_case : List[Any]="<s>" , _snake_case : int="</s>" , _snake_case : List[str]="</s>" , _snake_case : Dict="<s>" , _snake_case : Tuple="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : List[str] , )->None:
'''simple docstring'''
__lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowerCAmelCase : Tuple = 7
__lowerCAmelCase : Any = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__lowerCAmelCase : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__lowerCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__lowerCAmelCase : Union[str, Any] = len(self.sp_model )
__lowerCAmelCase : List[Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
__lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _snake_case : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowerCAmelCase : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Union[str, Any] )->Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase : Union[str, Any] = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[int] )->Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Any )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : int = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 240 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
lowerCamelCase : Any = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowerCamelCase : Optional[int] = components[:-1] + [test_fn.replace(".py" , "" )]
lowerCamelCase : Dict = ".".join(SCREAMING_SNAKE_CASE_ )
return test_module_path
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = get_module_path(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = importlib.import_module(SCREAMING_SNAKE_CASE_ )
return test_module
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = []
lowerCamelCase : List[Any] = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = []
lowerCamelCase : Optional[Any] = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = get_test_classes(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = test_class()
if hasattr(SCREAMING_SNAKE_CASE_ , "setUp" ):
test.setUp()
lowerCamelCase : List[str] = None
if hasattr(SCREAMING_SNAKE_CASE_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = get_test_classes(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = []
for test_class in test_classes:
lowerCamelCase : Any = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = get_test_classes(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = get_model_classes(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = get_model_classes(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE_ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {to_json(SCREAMING_SNAKE_CASE_ ): to_json(SCREAMING_SNAKE_CASE_ ) for k, v in o.items()}
else:
return o
| 340 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
super().__init__(*__A , **__A )
lowerCamelCase : Dict = {}
def _snake_case ( self , __A , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : int = super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , __A , *__A , __A=1 , **__A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
lowerCamelCase : Any = []
for i in range(__A ):
lowerCamelCase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowerCamelCase : Tuple = output
def _snake_case ( self , __A , __A=False , __A=1.0 ):
"""simple docstring"""
if isinstance(__A , __A ):
lowerCamelCase : Optional[Any] = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : Optional[int] = self.token_map[placeholder_token]
lowerCamelCase : str = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : List[str] = copy.copy(__A )
random.shuffle(__A )
lowerCamelCase : Any = text.replace(__A , " ".join(__A ) )
return text
def __call__( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def _snake_case ( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
| 340 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __snake_case ( lowercase_ ):
lowerCAmelCase_ = '''table-transformer'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , _lowercase : Optional[Any]=True , _lowercase : Tuple=None , _lowercase : Optional[Any]=3 , _lowercase : Tuple=1_00 , _lowercase : Tuple=6 , _lowercase : Tuple=20_48 , _lowercase : List[Any]=8 , _lowercase : List[Any]=6 , _lowercase : Tuple=20_48 , _lowercase : Union[str, Any]=8 , _lowercase : Tuple=0.0 , _lowercase : Dict=0.0 , _lowercase : str=True , _lowercase : str="relu" , _lowercase : List[str]=2_56 , _lowercase : List[Any]=0.1 , _lowercase : Optional[Any]=0.0 , _lowercase : str=0.0 , _lowercase : List[Any]=0.02 , _lowercase : int=1.0 , _lowercase : Tuple=False , _lowercase : Tuple="sine" , _lowercase : str="resnet50" , _lowercase : List[Any]=True , _lowercase : str=False , _lowercase : Tuple=1 , _lowercase : Any=5 , _lowercase : List[Any]=2 , _lowercase : Dict=1 , _lowercase : Optional[Any]=1 , _lowercase : Any=5 , _lowercase : int=2 , _lowercase : Optional[Any]=0.1 , **_lowercase : Dict , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __a ( self : str ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.d_model
class __snake_case ( lowercase_ ):
lowerCAmelCase_ = version.parse("1.11" )
@property
def __a ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __a ( self : List[str] ):
"""simple docstring"""
return 1E-5
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 12
| 711 | from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __snake_case :
def __init__( self : Union[str, Any] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = None
class __snake_case :
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __iter__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__ = node.next
if node == self.head:
break
def __len__( self : int ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
"""simple docstring"""
return "->".join(str(_lowercase ) for item in iter(self ) )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , _lowercase )
def __a ( self : Tuple , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(0 , _lowercase )
def __a ( self : Tuple , _lowercase : int , _lowercase : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
if self.head is None:
SCREAMING_SNAKE_CASE__ = new_node # first node points itself
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__ = new_node
def __a ( self : Dict ):
"""simple docstring"""
return self.delete_nth(0 )
def __a ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __a ( self : str , _lowercase : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__ = self.tail.next.next
SCREAMING_SNAKE_CASE__ = self.head.next
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__ = temp
return delete_node.data
def __a ( self : Optional[int] ):
"""simple docstring"""
return len(self ) == 0
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
__lowercase :List[Any] = "luke"
def __init__( self , UpperCamelCase__=50_267 , UpperCamelCase__=500_000 , UpperCamelCase__=768 , UpperCamelCase__=256 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = entity_vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = entity_emb_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_entity_aware_attention
lowerCamelCase_ = classifier_dropout | 142 |
import enum
import shutil
import sys
__A, __A =shutil.get_terminal_size()
__A ={'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__="" ):
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def lowerCamelCase_ ( ):
forceWrite("\r" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def lowerCamelCase_ ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase_ ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 463 | 0 |
'''simple docstring'''
from itertools import permutations
def __UpperCAmelCase ( a_: tuple ) -> Optional[Any]:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase : int = [7, 11, 13, 17]
for i, test in enumerate(a_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __UpperCAmelCase ( a_: int = 10 ) -> Dict:
return sum(
int("".join(map(a_, a_ ) ) )
for num in permutations(range(a_ ) )
if is_substring_divisible(a_ ) )
if __name__ == "__main__":
print(f'{solution() = }') | 707 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(a_, "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main() | 257 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase__ : Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 376 |
import numpy as np
import qiskit
def lowerCamelCase__ ( _A = 8 , _A = None ):
'''simple docstring'''
snake_case_ = np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case_ = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case_ = rng.integers(2 , size=_A )
# The set of states Alice will prepare.
snake_case_ = rng.integers(2 , size=_A )
# Measurement basis for Bob's qubits.
snake_case_ = rng.integers(2 , size=_A )
# Quantum Circuit to simulate BB84
snake_case_ = qiskit.QuantumCircuit(_A , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case_ = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case_ = qiskit.execute(_A , _A , shots=1 , seed_simulator=_A )
# Returns the result of measurement.
snake_case_ = job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case_ = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A , _A , _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case_ = gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , "0" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 376 | 1 |
'''simple docstring'''
import argparse
import copy
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
with open(__lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with open(__lowercase ) as f:
_UpperCAmelCase = f.read(1 )
_UpperCAmelCase = start_node
_UpperCAmelCase = []
_UpperCAmelCase = start_node
_UpperCAmelCase = 0
while visiting not in first_solution:
_UpperCAmelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowercase ) and k[0] not in first_solution:
_UpperCAmelCase = k[1]
_UpperCAmelCase = k[0]
first_solution.append(__lowercase )
_UpperCAmelCase = distance_of_first_solution + int(__lowercase )
_UpperCAmelCase = best_node
first_solution.append(__lowercase )
_UpperCAmelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
for n in solution[1:-1]:
_UpperCAmelCase = solution.index(__lowercase )
for kn in solution[1:-1]:
_UpperCAmelCase = solution.index(__lowercase )
if n == kn:
continue
_UpperCAmelCase = copy.deepcopy(__lowercase )
_UpperCAmelCase = kn
_UpperCAmelCase = n
_UpperCAmelCase = 0
for k in _tmp[:-1]:
_UpperCAmelCase = _tmp[_tmp.index(__lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase = distance + int(i[1] )
_tmp.append(__lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : int , __lowercase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = first_solution
_UpperCAmelCase = []
_UpperCAmelCase = distance_of_first_solution
_UpperCAmelCase = solution
while count <= iters:
_UpperCAmelCase = find_neighborhood(__lowercase , __lowercase )
_UpperCAmelCase = 0
_UpperCAmelCase = neighborhood[index_of_best_solution]
_UpperCAmelCase = len(__lowercase ) - 1
_UpperCAmelCase = False
while not found:
_UpperCAmelCase = 0
while i < len(__lowercase ):
if best_solution[i] != solution[i]:
_UpperCAmelCase = best_solution[i]
_UpperCAmelCase = solution[i]
break
_UpperCAmelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase = True
_UpperCAmelCase = best_solution[:-1]
_UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase = cost
_UpperCAmelCase = solution
else:
_UpperCAmelCase = index_of_best_solution + 1
_UpperCAmelCase = neighborhood[index_of_best_solution]
if len(__lowercase ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( __lowercase : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = generate_neighbours(args.File )
_UpperCAmelCase , _UpperCAmelCase = generate_first_solution(
args.File , __lowercase )
_UpperCAmelCase , _UpperCAmelCase = tabu_search(
__lowercase , __lowercase , __lowercase , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowercase ( self : int ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = get_activation("gelu_10" )
_UpperCAmelCase = torch_builtin(snake_case_ )
_UpperCAmelCase = geluaa(snake_case_ )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase ( self : Any ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(snake_case_ ):
get_activation("bogus" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = acta.a
| 119 | 0 |
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
SCREAMING_SNAKE_CASE : Union[str, Any] = (boundary[1] - boundary[0]) / steps
SCREAMING_SNAKE_CASE : str = boundary[0]
SCREAMING_SNAKE_CASE : Tuple = boundary[1]
SCREAMING_SNAKE_CASE : Any = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = 0.0
y += (h / 2.0) * f(__lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(__lowerCAmelCase )
y += (h / 2.0) * f(__lowerCAmelCase )
return y
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Dict = a + h
while x < (b - h):
yield x
SCREAMING_SNAKE_CASE : Union[str, Any] = x + h
def __a ( __lowerCAmelCase ) -> Union[str, Any]: # enter your function here
SCREAMING_SNAKE_CASE : Any = (x - 0) * (x - 0)
return y
def __a ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 # Lower bound of integration
SCREAMING_SNAKE_CASE : int = 1.0 # Upper bound of integration
SCREAMING_SNAKE_CASE : int = 10.0 # define number of steps or resolution
SCREAMING_SNAKE_CASE : int = [a, b] # define boundary of integration
SCREAMING_SNAKE_CASE : Tuple = method_a(__lowerCAmelCase , __lowerCAmelCase )
print(F'''y = {y}''' )
if __name__ == "__main__":
main() | 352 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.pool(self.model(snake_case ) )
SCREAMING_SNAKE_CASE : Any = torch.flatten(snake_case , start_dim=2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [json.loads(snake_case ) for l in open(snake_case )]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer
SCREAMING_SNAKE_CASE : Dict = labels
SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Dict , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=snake_case ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : str = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE : Tuple = self.transforms(snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ), max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = input_row['sentence']
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE : Tuple = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __a ( ) -> str:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __a ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] ) | 352 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ : Optional[Any] = cst_fwd.get(__lowercase , np.inf )
lowerCamelCase__ : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ : Optional[Any] = new_cost_f
lowerCamelCase__ : Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ : List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Union[str, Any] = -1
lowerCamelCase__ : int = set()
lowerCamelCase__ : Optional[Any] = set()
lowerCamelCase__ : Dict = {source: 0}
lowerCamelCase__ : str = {destination: 0}
lowerCamelCase__ : Any = {source: None}
lowerCamelCase__ : List[Any] = {destination: None}
lowerCamelCase__ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase__ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase__ : Optional[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ : Optional[int] = queue_forward.get()
visited_forward.add(__lowercase )
lowerCamelCase__ : List[Any] = queue_backward.get()
visited_backward.add(__lowercase )
lowerCamelCase__ : int = pass_and_relaxation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
lowerCamelCase__ : Any = pass_and_relaxation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ : List[Any] = shortest_distance
return shortest_path_distance
_UpperCAmelCase : List[str] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase : Optional[int] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default="""cifar10""", metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The column name of the images in the files."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase__ = field(
default=0.15, metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = {}
if self.train_dir is not None:
lowerCamelCase__ : int = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Dict = self.validation_dir
lowerCamelCase__ : Union[str, Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase__ = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
UpperCAmelCase__ = field(
default=0.75, metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = field(
default=1E-3, metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : str = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : List[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Optional[int] = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[str] = split['train']
lowerCamelCase__ : List[str] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Union[str, Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ : Dict = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
lowerCamelCase__ : Union[str, Any] = ds['train'].column_names
else:
lowerCamelCase__ : Any = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : str = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : Tuple = 'image'
elif "img" in column_names:
lowerCamelCase__ : int = 'img'
else:
lowerCamelCase__ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : List[Any] = image_processor.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ : Optional[Any] = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCAmelCase ):
lowerCamelCase__ : Tuple = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Optional[int] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[str] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
lowerCamelCase__ : Union[str, Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ : str = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Any = last_checkpoint
lowerCamelCase__ : Optional[int] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : Dict = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Union[str, Any] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 188 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase_ = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase_ = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class a_ ( a_ ):
'''simple docstring'''
__a: Tuple = '''whisper'''
__a: Dict = ['''past_key_values''']
__a: List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=5_1_8_6_5 , lowercase_=8_0 , lowercase_=6 , lowercase_=4 , lowercase_=6 , lowercase_=4 , lowercase_=1_5_3_6 , lowercase_=1_5_3_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=5_0_2_5_7 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=2_5_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=False , lowercase_=1_5_0_0 , lowercase_=4_4_8 , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , lowercase_=None , lowercase_=[2_2_0, 5_0_2_5_6] , lowercase_=False , lowercase_=2_5_6 , lowercase_=False , lowercase_=0.05 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_=7 , **lowercase_ , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ = max_source_positions
lowerCAmelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ = classifier_proj_size
lowerCAmelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
lowerCAmelCase_ = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class a_ ( a_ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase_ = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
lowerCAmelCase_ = {0: 'batch'}
else:
lowerCAmelCase_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def _lowercase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 2_2_0_5_0 , lowercase_ = 5.0 , lowercase_ = 2_2_0 , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
lowerCAmelCase_ = encoder_inputs['input_features'].shape[2]
lowerCAmelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = encoder_inputs.pop('input_features' )
lowerCAmelCase_ = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase_ = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: jnp.ndarray
__a: jnp.ndarray
class a_ ( nn.Module ):
'''simple docstring'''
__a: int
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__a: jnp.dtype = jnp.floataa
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ = self.block_out_channels[i]
lowerCAmelCase_ = self.block_out_channels[i + 1]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = blocks
lowerCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
for block in self.blocks:
lowerCAmelCase_ = block(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
lowerCAmelCase_ = self.conv_out(lowercase_ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a_ , a_ ):
'''simple docstring'''
__a: int = 3_2
__a: int = 4
__a: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a: Union[bool, Tuple[bool]] = False
__a: Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__a: int = 2
__a: Union[int, Tuple[int]] = 8
__a: Optional[Union[int, Tuple[int]]] = None
__a: int = 1_2_8_0
__a: float = 0.0
__a: bool = False
__a: jnp.dtype = jnp.floataa
__a: bool = True
__a: int = 0
__a: str = "rgb"
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _lowercase ( self , lowercase_ ) -> FrozenDict:
'''simple docstring'''
lowerCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ = jax.random.split(lowercase_ )
lowerCAmelCase_ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.block_out_channels
lowerCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
lowerCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = block_out_channels[0]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ = output_channel
lowerCAmelCase_ = block_out_channels[i]
lowerCAmelCase_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
if not is_final_block:
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
lowerCAmelCase_ = down_blocks
lowerCAmelCase_ = controlnet_down_blocks
# mid
lowerCAmelCase_ = block_out_channels[-1]
lowerCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ = jnp.flip(lowercase_ , axis=1 )
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
lowerCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ = jnp.expand_dims(lowercase_ , 0 )
lowerCAmelCase_ = self.time_proj(lowercase_ )
lowerCAmelCase_ = self.time_embedding(lowercase_ )
# 2. pre-process
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.controlnet_cond_embedding(lowercase_ )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks ):
lowerCAmelCase_ = controlnet_block(lowercase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ = controlnet_down_block_res_samples
lowerCAmelCase_ = self.controlnet_mid_block(lowercase_ )
# 6. scaling
lowerCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_ )
| 318 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[Any] = 0
while number > 0:
_snake_case : Optional[Any] = number % 10
sum_of_digits += last_digit
_snake_case : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase__ (snake_case__ : int = 1_00 ):
"""simple docstring"""
_snake_case : Optional[int] = factorial(snake_case__ )
_snake_case : List[str] = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCamelCase : List[Any] = re.compile(r'''\s+''')
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE__ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [len(SCREAMING_SNAKE_CASE__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE__ ), "line_max": max(SCREAMING_SNAKE_CASE__ )}
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=5 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["auto-generated", "autogenerated", "automatically generated"]
SCREAMING_SNAKE_CASE__ : int = example["content"].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_5 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["unit tests", "test file", "configuration file"]
SCREAMING_SNAKE_CASE__ : Tuple = example["content"].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : int = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : Any = example["content"].count("\n" )
SCREAMING_SNAKE_CASE__ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["def ", "class ", "for ", "while "]
SCREAMING_SNAKE_CASE__ : List[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = example["content"].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(example["content"] , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
SCREAMING_SNAKE_CASE__ : List[str] = len(example["content"] ) / len(SCREAMING_SNAKE_CASE__ )
return {"ratio": ratio}
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = {}
results.update(get_hash(SCREAMING_SNAKE_CASE__ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE__ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE__ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE__ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE__ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE__ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE__ ) )
return results
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if not check_uniques(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE__ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
os.unlink(SCREAMING_SNAKE_CASE__ )
# Settings
_lowerCamelCase : Tuple = HfArgumentParser(PreprocessingArguments)
_lowerCamelCase : int = parser.parse_args()
if args.num_workers is None:
_lowerCamelCase : Optional[int] = multiprocessing.cpu_count()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCamelCase : Dict = time.time()
_lowerCamelCase : Union[str, Any] = load_dataset(args.dataset_name, split='''train''')
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase : Dict = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
_lowerCamelCase : Optional[Any] = set(ds.unique('''hash'''))
_lowerCamelCase : Any = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Union[str, Any] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCamelCase : Dict = time.time()
_lowerCamelCase , _lowerCamelCase : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
_lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCamelCase : Union[str, Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCamelCase : Tuple = str(data_dir / f"file-{file_number+1:012}.json")
_lowerCamelCase : Dict = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 663 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
_UpperCAmelCase : List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> List[str]:
_UpperCAmelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCAmelCase : List[Any] = value
else:
_UpperCAmelCase : int = value
return new_state_dict
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
_UpperCAmelCase : int = ""
if is_panoptic:
_UpperCAmelCase : Any = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : str = in_proj_weight[:2_5_6, :]
_UpperCAmelCase : Tuple = in_proj_bias[:2_5_6]
_UpperCAmelCase : Dict = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase : int = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase : Optional[int] = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-2_5_6:]
def lowerCAmelCase_ ( ) -> List[Any]:
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
_UpperCAmelCase : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCAmelCase : Optional[int] = "resnet101"
if "dc5" in model_name:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = "panoptic" in model_name
if is_panoptic:
_UpperCAmelCase : Optional[Any] = 2_5_0
else:
_UpperCAmelCase : List[str] = 9_1
_UpperCAmelCase : List[str] = "huggingface/label-files"
_UpperCAmelCase : str = "coco-detection-id2label.json"
_UpperCAmelCase : int = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCAmelCase : Optional[Any] = "coco_panoptic" if is_panoptic else "coco_detection"
_UpperCAmelCase : str = ConditionalDetrImageProcessor(format=_SCREAMING_SNAKE_CASE )
# prepare image
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_UpperCAmelCase : Optional[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , _SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCAmelCase : Union[str, Any] = "conditional_detr." + src
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE , is_panoptic=_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : Optional[int] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_UpperCAmelCase : Union[str, Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase : List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_UpperCAmelCase : Any = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCAmelCase : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = ConditionalDetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=_SCREAMING_SNAKE_CASE , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_UpperCAmelCase : Tuple = conditional_detr(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 714 |
from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> set[str]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
_UpperCAmelCase : Optional[Any] = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
__lowerCamelCase = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 328 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
UpperCamelCase :Any = parent
UpperCamelCase :Union[str, Any] = 13
UpperCamelCase :Tuple = 7
UpperCamelCase :Dict = True
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Tuple = True
UpperCamelCase :Optional[int] = True
UpperCamelCase :List[str] = True
UpperCamelCase :List[Any] = False
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = False
UpperCamelCase :List[Any] = 2
UpperCamelCase :List[Any] = 99
UpperCamelCase :Union[str, Any] = 0
UpperCamelCase :Union[str, Any] = 32
UpperCamelCase :Union[str, Any] = 2
UpperCamelCase :Dict = 4
UpperCamelCase :Dict = 0.1
UpperCamelCase :Tuple = 0.1
UpperCamelCase :Optional[Any] = 512
UpperCamelCase :List[Any] = 16
UpperCamelCase :Dict = 2
UpperCamelCase :Any = 0.02
UpperCamelCase :str = 3
UpperCamelCase :str = 4
UpperCamelCase :Union[str, Any] = '''last'''
UpperCamelCase :Optional[int] = True
UpperCamelCase :Any = None
UpperCamelCase :Optional[Any] = 0
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase :Tuple = None
if self.use_input_lengths:
UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase :Tuple = None
if self.use_token_type_ids:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase :Any = None
UpperCamelCase :Tuple = None
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase :Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
UpperCamelCase :List[Any] = TFFlaubertModel(config=__lowercase )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase :Optional[Any] = model(__lowercase )
UpperCamelCase :int = [input_ids, input_mask]
UpperCamelCase :List[str] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
UpperCamelCase :List[Any] = TFFlaubertWithLMHeadModel(__lowercase )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase :Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :List[str] = TFFlaubertForQuestionAnsweringSimple(__lowercase )
UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase :List[Any] = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> List[str]:
UpperCamelCase :Union[str, Any] = TFFlaubertForSequenceClassification(__lowercase )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase :Dict = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :int = self.num_labels
UpperCamelCase :Any = TFFlaubertForTokenClassification(config=__lowercase )
UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase :Dict = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
UpperCamelCase :Any = self.num_choices
UpperCamelCase :Optional[int] = TFFlaubertForMultipleChoice(config=__lowercase )
UpperCamelCase :Tuple = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :List[Any] = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :List[str] = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase :str = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[str] = config_and_inputs
UpperCamelCase :List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : List[Any] =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : Any =(
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :int = TFFlaubertModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , emb_dim=37 )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__lowercase )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__lowercase )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Dict = TFFlaubertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :int = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCamelCase :Any = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase :Optional[int] = model(__lowercase )[0]
UpperCamelCase :str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
UpperCamelCase :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 658 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self : List[Any] , __lowercase : str , __lowercase : Any , __lowercase : str ):
"""simple docstring"""
__lowercase =name
__lowercase =value
__lowercase =weight
def __repr__( self : List[str] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def snake_case ( self : List[Any] ):
"""simple docstring"""
return self.value
def snake_case ( self : str ):
"""simple docstring"""
return self.name
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.weight
def snake_case ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : List[str], lowercase__ : str ):
'''simple docstring'''
__lowercase =[]
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : List[Any], lowercase__ : str ):
'''simple docstring'''
__lowercase =sorted(lowercase__, key=lowercase__, reverse=lowercase__ )
__lowercase =[]
__lowercase , __lowercase =0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BlipaProcessor(_snake_case , _snake_case)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : Optional[int] , **_snake_case : int):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case).tokenizer
def lowerCamelCase ( self : str , **_snake_case : Optional[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case).image_processor
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0)
UpperCAmelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _snake_case)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_snake_case , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_snake_case)
UpperCAmelCase_ = tokenizer(_snake_case , return_token_type_ids=_snake_case)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
# test if it raises when no input is passed
with pytest.raises(_snake_case):
processor()
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_snake_case)
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipaProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''input_ids''', '''attention_mask'''])
| 169 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) <= 1:
return [tuple(lowercase__ )]
UpperCAmelCase_ =[]
def generate(lowercase__ , lowercase__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ , UpperCAmelCase_ =arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ , UpperCAmelCase_ =arr[k - 1], arr[0]
generate(k - 1 , lowercase__ )
generate(len(lowercase__ ) , lowercase__ )
return res
if __name__ == "__main__":
__lowercase : Dict =input("""Enter numbers separated by a comma:\n""").strip()
__lowercase : List[str] =[int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 54 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
a__ = logging.WARNING
def A__ () -> Any:
__UpperCamelCase : Union[str, Any] = os.getenv("""DATASETS_VERBOSITY""" , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def A__ () -> str:
return __name__.split(""".""" )[0]
def A__ () -> logging.Logger:
return logging.getLogger(_get_library_name() )
def A__ () -> None:
# Apply our default configuration to the library root logger.
__UpperCamelCase : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A__ () -> None:
__UpperCamelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A__ (snake_case : Optional[str] = None ) -> logging.Logger:
if name is None:
__UpperCamelCase : Optional[int] = _get_library_name()
return logging.getLogger(snake_case )
def A__ () -> int:
return _get_library_root_logger().getEffectiveLevel()
def A__ (snake_case : int ) -> None:
_get_library_root_logger().setLevel(snake_case )
def A__ () -> Dict:
return set_verbosity(snake_case )
def A__ () -> Any:
return set_verbosity(snake_case )
def A__ () -> Dict:
return set_verbosity(snake_case )
def A__ () -> str:
return set_verbosity(snake_case )
def A__ () -> None:
__UpperCamelCase : str = False
def A__ () -> None:
__UpperCamelCase : List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : List[str] ) -> Union[str, Any]: # pylint: disable=unused-argument
"""simple docstring"""
__UpperCamelCase : List[str] = args[0] if args else None
def __iter__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
def empty_fn(*lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Any:
"""simple docstring"""
return self
def __exit__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
return
a__ = True
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self : str , *lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=False , **lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase , **lowerCAmelCase )
else:
return EmptyTqdm(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__ = _tqdm_cls()
def A__ () -> bool:
global _tqdm_active
return bool(_tqdm_active )
def A__ () -> List[str]:
global _tqdm_active
__UpperCamelCase : Tuple = True
def A__ () -> int:
global _tqdm_active
__UpperCamelCase : Any = False
| 279 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=False ) -> List[str]:
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ = """"""
else:
lowerCamelCase_ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
lowerCamelCase_ = dct.pop(UpperCAmelCase__ )
lowerCamelCase_ = val
def __lowerCAmelCase ( ) -> str:
lowerCamelCase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase_ = 1_0_0_0
lowerCamelCase_ = """huggingface/label-files"""
lowerCamelCase_ = """imagenet-1k-id2label.json"""
lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = int(deit_name[-6:-4] )
lowerCamelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
elif deit_name[9:].startswith("""small""" ):
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
# load original model from timm
lowerCamelCase_ = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = timm_model.state_dict()
lowerCamelCase_ = create_rename_keys(UpperCAmelCase__ , UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# load HuggingFace model
lowerCamelCase_ = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase_ = DeiTImageProcessor(size=UpperCAmelCase__ , crop_size=config.image_size )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase_ = encoding["""pixel_values"""]
lowerCamelCase_ = model(UpperCAmelCase__ )
lowerCamelCase_ = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 103 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A( UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : Any ):
raise NotImplementedError()
| 103 | 1 |
def a_ ( _A , _A ) -> str:
"""simple docstring"""
snake_case__ = len(_A )
snake_case__ = len(_A )
snake_case__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 328 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case__ = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def a_ ( _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = str(_A )
dataset_info.write_to_directory(_A )
snake_case__ = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case__ = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case__ = yaml.safe_dump(_A )
snake_case__ = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = DatasetInfo()
snake_case__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def a_ ( _A , _A ) -> str:
"""simple docstring"""
snake_case__ = str(_A )
dataset_infos_dict.write_to_directory(_A )
snake_case__ = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 328 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE : List[Any] = KarrasVeScheduler()
_SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="numpy" ).images
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = pipe(num_inference_steps=2 , generator=snake_case__ , output_type="numpy" , return_dict=snake_case__ )[0]
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = "google/ncsnpp-celebahq-256"
_SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = KarrasVeScheduler()
_SCREAMING_SNAKE_CASE : Optional[int] = KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=20 , generator=snake_case__ , output_type="numpy" ).images
_SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 701 |
"""simple docstring"""
import operator as op
def _lowerCAmelCase ( lowerCamelCase__ : Tuple ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : str = lambda lowerCamelCase__, lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ), "Action".center(1_2 ), "Stack", sep=" | " )
print("-" * (3_0 + len(lowerCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("push(" + x + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
else:
_SCREAMING_SNAKE_CASE : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
_SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + a + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
stack.append(
str(opr[x](int(lowerCamelCase__ ), int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("push(" + a + x + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | ", )
return int(stack[0] )
if __name__ == "__main__":
lowercase_ : int = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 295 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A : Any = logging.get_logger(__name__)
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case ) | 219 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A( unittest.TestCase ):
snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__a = text_generator('''This is a test''' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
] , )
__a = text_generator.model.config.eos_token_id
__a = '''<pad>'''
__a = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
[
{'''generated_token_ids''': ANY(_snake_case )},
{'''generated_token_ids''': ANY(_snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__a = text_generator('''This is a test''' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__a = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = '''Hello I believe in'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__a = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__a = text_generator(_snake_case , stop_sequence=''' fe''' )
self.assertEqual(_snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = text_generator.model
__a = text_generator.tokenizer
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = pipeline(task='''text-generation''' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
__a = text_generator('''This is a test''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__a = text_generator('''This is a test''' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__a = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
[{'''generated_text''': ANY(_snake_case )}, {'''generated_text''': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
__a = text_generator('''test''' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a = text_generator('''''' )
self.assertEqual(_snake_case , [{'''generated_text''': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
__a = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a = pipe('''This is a test''' )
self.assertEqual(
_snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
import torch
__a = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_snake_case , top_p=0.5 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = '''Hello world'''
__a = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__a = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__a = logging.get_logger('''transformers.generation.utils''' )
__a = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
__a = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out ) | 219 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json"}
a_ : str = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
a_ : Dict = {"mgp-str": 2_7}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __magic_name__ , __magic_name__="[GO]" , __magic_name__="[GO]" , __magic_name__="[s]" , __magic_name__="[GO]" , **__magic_name__ ) -> List[Any]:
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding='utf-8' ) as vocab_handle:
_a = json.load(__magic_name__ )
_a = {v: k for k, v in self.vocab.items()}
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return len(self.vocab )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __magic_name__ ) -> int:
_a = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self , __magic_name__ ) -> Tuple:
return self.decoder.get(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(__magic_name__ ) )
return
_a = os.path.join(
__magic_name__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__magic_name__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '\n' )
return (vocab_file,)
| 719 |
'''simple docstring'''
from functools import reduce
a_ : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : str(int(lowerCAmelCase__ ) * int(lowerCAmelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase__ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 532 | 0 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ) -> Dict:
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = None , snake_case_ = 50 , snake_case_ = "pil" , snake_case_ = True , **snake_case_ , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case_ , )
_UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case_ ), "This is a local test"
| 426 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A__ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(A__ ) )
_UpperCAmelCase = os.path.join(A__ , "words.txt" )
_UpperCAmelCase = ""
with open(A__ ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_UpperCAmelCase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 426 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36 | 0 |
class lowercase_ :
def __init__( self , lowercase_ , lowercase_) -> str:
a__ =name
a__ =val
def __str__( self) -> Tuple:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowercase_) -> Any:
return self.val < other.val
class lowercase_ :
def __init__( self , lowercase_) -> Any:
a__ ={}
a__ ={}
a__ =self.build_heap(lowercase_)
def __getitem__( self , lowercase_) -> List[str]:
return self.get_value(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
return (idx - 1) // 2
def __UpperCamelCase ( self , lowercase_) -> int:
return idx * 2 + 1
def __UpperCamelCase ( self , lowercase_) -> List[Any]:
return idx * 2 + 2
def __UpperCamelCase ( self , lowercase_) -> Any:
return self.heap_dict[key]
def __UpperCamelCase ( self , lowercase_) -> str:
a__ =len(lowercase_) - 1
a__ =self.get_parent_idx(lowercase_)
for idx, i in enumerate(lowercase_):
a__ =idx
a__ =i.val
for i in range(lowercase_ , -1 , -1):
self.sift_down(lowercase_ , lowercase_)
return array
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[str]:
while True:
a__ =self.get_left_child_idx(lowercase_) # noqa: E741
a__ =self.get_right_child_idx(lowercase_)
a__ =idx
if l < len(lowercase_) and array[l] < array[idx]:
a__ =l
if r < len(lowercase_) and array[r] < array[smallest]:
a__ =r
if smallest != idx:
a__ , a__ =array[smallest], array[idx]
(
(
a__
) , (
a__
) ,
) =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a__ =smallest
else:
break
def __UpperCamelCase ( self , lowercase_) -> Dict:
a__ =self.get_parent_idx(lowercase_)
while p >= 0 and self.heap[p] > self.heap[idx]:
a__ , a__ =self.heap[idx], self.heap[p]
a__ , a__ =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a__ =p
a__ =self.get_parent_idx(lowercase_)
def __UpperCamelCase ( self) -> List[str]:
return self.heap[0]
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.heap[-1], self.heap[0]
a__ , a__ =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a__ =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def __UpperCamelCase ( self , lowercase_) -> Tuple:
self.heap.append(lowercase_)
a__ =len(self.heap) - 1
a__ =node.val
self.sift_up(len(self.heap) - 1)
def __UpperCamelCase ( self) -> Union[str, Any]:
return len(self.heap) == 0
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a__ =new_value
a__ =new_value
self.sift_up(self.idx_of_element[node])
_lowerCAmelCase: Tuple = Node('R', -1)
_lowerCAmelCase: Optional[int] = Node('B', 6)
_lowerCAmelCase: Tuple = Node('A', 3)
_lowerCAmelCase: int = Node('X', 1)
_lowerCAmelCase: List[str] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase: int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
def __snake_case ( _UpperCamelCase ) -> int:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 487 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a_ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5E-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=1_0 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
__a : Optional[int] = load("""accuracy""")
def a_ ( __snake_case ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = eval_pred
UpperCamelCase_ = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class A ( lowerCamelCase_ ):
def __init__( self : int , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = trainer
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
if control.should_evaluate:
UpperCamelCase_ = deepcopy(__UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def a_ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = get_args()
set_seed(args.seed )
UpperCamelCase_ = load_dataset('codeparrot/codecomplex' , split='train' )
UpperCamelCase_ = dataset.train_test_split(test_size=0.2 )
UpperCamelCase_ = train_test['test'].train_test_split(test_size=0.5 )
UpperCamelCase_ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_ = tokenizer.eos_token
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCamelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCamelCase_ = False
UpperCamelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case ):
UpperCamelCase_ = tokenizer(example['src'] , truncation=__snake_case , max_length=1_0_2_4 )
UpperCamelCase_ = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCamelCase_ = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
UpperCamelCase_ = DataCollatorWithPadding(tokenizer=__snake_case )
UpperCamelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
UpperCamelCase_ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 559 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : str=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Union[str, Any]=36 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : List[str]=6 , __UpperCAmelCase : Union[str, Any]=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = embedding_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_hidden_groups
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = AlbertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , sentence_order_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AlbertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = AlbertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
UpperCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AlbertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
UpperCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 559 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowercase_ = parser.parse_args()
if args.model_type == "roberta":
lowercase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase_ = 'roberta'
elif args.model_type == "gpt2":
lowercase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase_ = 'transformer'
lowercase_ = model.state_dict()
lowercase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase_ = state_dict[f"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase_ = f"{prefix}.embeddings.{w}.weight"
lowercase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase_ = f"{prefix}.embeddings.LayerNorm.{w}"
lowercase_ = state_dict[param_name]
# Transformer Blocks #
lowercase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
f"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
lowercase_ = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase_ = state_dict[f"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase_ = state_dict[f"lm_head.dense.{w}"]
lowercase_ = state_dict[f"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase_ = state_dict[f"{prefix}.ln_f.{w}"]
lowercase_ = state_dict['lm_head.weight']
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 291 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =load_tool('text-question-answering' )
self.tool.setup()
_lowercase =load_tool('text-question-answering' , remote=lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.remote_tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.remote_tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
| 291 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = None
@staticmethod
def UpperCamelCase_ ( ):
raise NotImplementedError
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
raise NotImplementedError
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
raise NotImplementedError
def UpperCamelCase_ ( self ):
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def UpperCamelCase_ ( cls ):
return F'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'optuna'
@staticmethod
def UpperCamelCase_ ( ):
return is_optuna_available()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return run_hp_search_optuna(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return default_hp_space_optuna(_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'ray'
_UpperCamelCase = '\'ray[tune]\''
@staticmethod
def UpperCamelCase_ ( ):
return is_ray_available()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return run_hp_search_ray(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return default_hp_space_ray(_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'sigopt'
@staticmethod
def UpperCamelCase_ ( ):
return is_sigopt_available()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return run_hp_search_sigopt(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return default_hp_space_sigopt(_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'wandb'
@staticmethod
def UpperCamelCase_ ( ):
return is_wandb_available()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return run_hp_search_wandb(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return default_hp_space_wandb(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A__ ( ):
lowerCamelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = available_backends[0].name
if len(__lowerCAmelCase ) > 1:
logger.info(
F'''{len(__lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
def __lowercase ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
__lowercase = (n * (n + 1) // 2) ** 2
__lowercase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 321 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=False , A__=True , A__="None" , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self , A__ ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = DebertaVaModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ )[0]
_SCREAMING_SNAKE_CASE = model(A__ , token_type_ids=A__ )[0]
_SCREAMING_SNAKE_CASE = model(A__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A__ )
@slow
def UpperCamelCase ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
@slow
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(A__ , attention_mask=A__ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 591 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Optional[int]) -> List[Any]:
_A = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_A = [144, 192, 240]
_A = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_A = [96, 120, 144]
_A = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_A = [64, 80, 96]
_A = [16, 16, 24, 48, 64, 80, 320]
_A = 0.05
_A = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
_A = 512
_A = 16
_A = 21
_A = """pascal-voc-id2label.json"""
else:
_A = 1_000
_A = """imagenet-1k-id2label.json"""
_A = """huggingface/label-files"""
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( snake_case__ :Any , snake_case__ :Optional[Any]=False) -> List[Any]:
for i in range(1 , 6):
if F'''layer_{i}.''' in name:
_A = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''')
if "conv_1." in name:
_A = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
_A = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
_A = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
_A = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
_A = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
_A = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
_A = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
_A = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
_A = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''')
for i in range(2 , 6):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''')
if "expand_1x1" in name:
_A = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
_A = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
_A = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'''.global_rep.{i}.weight''' in name:
_A = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""")
if F'''.global_rep.{i}.bias''' in name:
_A = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""")
if ".global_rep." in name:
_A = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
_A = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
_A = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
_A = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
_A = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
_A = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
_A = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
_A = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
_A = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
_A = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
_A = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
_A = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
_A = """mobilevit.""" + name
return name
def snake_case ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Union[str, Any]=False) -> Optional[Any]:
if base_model:
_A = """"""
else:
_A = """mobilevit."""
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(snake_case__)
if key[:8] == "encoder.":
_A = key[8:]
if "qkv" in key:
_A = key.split(""".""")
_A = int(key_split[0][6:]) - 1
_A = int(key_split[3])
_A = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''')
_A = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_A = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def snake_case ( ) -> Optional[Any]:
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
return im
@torch.no_grad()
def snake_case ( snake_case__ :str , snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=False) -> Tuple:
_A = get_mobilevit_config(snake_case__)
# load original state_dict
_A = torch.load(snake_case__ , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
_A = MobileViTForSemanticSegmentation(snake_case__).eval()
else:
_A = MobileViTForImageClassification(snake_case__).eval()
_A = convert_state_dict(snake_case__ , snake_case__)
model.load_state_dict(snake_case__)
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
_A = image_processor(images=prepare_img() , return_tensors="""pt""")
_A = model(**snake_case__)
_A = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_A = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_A = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_A = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4)
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_A = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
_A = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
_A = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4)
Path(snake_case__).mkdir(exist_ok=snake_case__)
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case__)
if push_to_hub:
_A = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
_A = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__ , organization="""apple""")
model.push_to_hub(snake_case__ , organization="""apple""")
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 83 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *_snake_case : int , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
pass
def A_ (__a ):
'''simple docstring'''
A_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ (__a ):
'''simple docstring'''
A_ = np.array(__a )
A_ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> str:
"""simple docstring"""
A_ = MaskGenerationPipeline(model=_snake_case , image_processor=_snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : int , _snake_case : int , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
A_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
A_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = "facebook/sam-vit-huge"
A_ = pipeline("mask-generation" , model=_snake_case )
A_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
] , )
| 482 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase_ : str = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase_ : Dict = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def A_ (__a ):
'''simple docstring'''
A_ = None
# source code of `config_class`
A_ = inspect.getsource(__a )
A_ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A_ = ckpt_name
break
return checkpoint
def A_ ():
'''simple docstring'''
A_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A_ = get_checkpoint_from_config_class(__a )
A_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
A_ = "\n".join(sorted(__a ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 482 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] , __lowerCamelCase:Tuple ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCAmelCase ( __lowerCamelCase:Optional[Any] , __lowerCamelCase:Optional[Any] , __lowerCamelCase:Optional[int] ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCAmelCase ( __lowerCamelCase:Optional[Any] , __lowerCamelCase:Optional[Any] , __lowerCamelCase:Any ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def _lowerCAmelCase ( __lowerCamelCase:List[str] , __lowerCamelCase:List[Any] , __lowerCamelCase:Dict ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _lowerCAmelCase ( __lowerCamelCase:Tuple , __lowerCamelCase:List[str] ):
'''simple docstring'''
__magic_name__ = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__magic_name__ = features.copy()
__magic_name__ = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = tmp_path / "cache"
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCAmelCase ( __lowerCamelCase:List[str] , __lowerCamelCase:int , __lowerCamelCase:Any ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] , __lowerCamelCase:Any , __lowerCamelCase:Any ):
'''simple docstring'''
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__magic_name__ = jsonl_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__magic_name__ = [jsonl_path]
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( __lowerCamelCase:Union[str, Any] , __lowerCamelCase:Tuple , __lowerCamelCase:str=("train",) ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCAmelCase ( __lowerCamelCase:List[str] , __lowerCamelCase:Optional[int] , __lowerCamelCase:Union[str, Any] ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = JsonDatasetReader({"train": jsonl_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCAmelCase ( __lowerCamelCase:Any , __lowerCamelCase:int , __lowerCamelCase:Tuple ):
'''simple docstring'''
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = JsonDatasetReader({"train": jsonl_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCAmelCase ( __lowerCamelCase:Any , __lowerCamelCase:Optional[int] , __lowerCamelCase:List[Any] ):
'''simple docstring'''
if split:
__magic_name__ = {split: jsonl_path}
else:
__magic_name__ = "train"
__magic_name__ = {"train": jsonl_path, "test": jsonl_path}
__magic_name__ = tmp_path / "cache"
__magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCAmelCase ( __lowerCamelCase:List[Any] ):
'''simple docstring'''
return json.load(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
return [json.loads(_SCREAMING_SNAKE_CASE ) for line in buffer]
class A_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
__magic_name__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
__magic_name__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(__lowerCamelCase ) == 1_0
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(__lowerCamelCase ) == 1_0
def _snake_case ( self : int , __lowerCamelCase : int ) -> List[str]:
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def _snake_case ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> Dict:
__magic_name__ = tmp_path_factory.mktemp("data" ) / f'''test.json.{extension}'''
__magic_name__ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
__magic_name__ = f.read()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
__magic_name__ = f.read()
assert exported_content == original_content
| 701 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def __A ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 484 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "audio-spectrogram-transformer"
def __init__( self: Optional[Any] , __A: int=7_68 , __A: Optional[Any]=12 , __A: Tuple=12 , __A: Union[str, Any]=30_72 , __A: str="gelu" , __A: str=0.0 , __A: List[Any]=0.0 , __A: List[str]=0.02 , __A: List[str]=1e-12 , __A: Any=16 , __A: Dict=True , __A: Optional[Any]=10 , __A: Union[str, Any]=10 , __A: str=10_24 , __A: Optional[int]=1_28 , **__A: Tuple , ) -> List[Any]:
super().__init__(**__A )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = patch_size
_A = qkv_bias
_A = frequency_stride
_A = time_stride
_A = max_length
_A = num_mel_bins
| 484 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase_ ( _A : Dict=None ):
"""simple docstring"""
if subparsers is not None:
lowerCamelCase__ : List[Any] = subparsers.add_parser("env" )
else:
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=_A , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : int = torch.__version__
lowerCamelCase__ : Optional[int] = torch.cuda.is_available()
lowerCamelCase__ : Any = is_xpu_available()
lowerCamelCase__ : Dict = is_npu_available()
lowerCamelCase__ : str = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase__ : Any = load_config_from_file(args.config_file ).to_dict()
lowerCamelCase__ : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_A ),
"PyTorch NPU available": str(_A ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
lowerCamelCase__ : Dict = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowerCamelCase__ : Optional[int] = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_A , _A )
else F"\t{accelerate_config}"
)
print(_A )
lowerCamelCase__ : Tuple = accelerate_config
return info
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = env_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
env_command(_A )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 5 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _A ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
def UpperCAmelCase ( self ):
"""simple docstring"""
return {}
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
SCREAMING_SNAKE_CASE_ : Optional[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupLMFeatureExtractionTester(self )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = get_html_strings()[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
SCREAMING_SNAKE_CASE_ : Dict = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , _SCREAMING_SNAKE_CASE )
# Test batched
SCREAMING_SNAKE_CASE_ : Tuple = get_html_strings()
SCREAMING_SNAKE_CASE_ : str = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ : Union[str, Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
SCREAMING_SNAKE_CASE_ : Any = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , _SCREAMING_SNAKE_CASE )
| 511 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 511 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__: str= tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__: Union[str, Any]= tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 107 | def A__ ( snake_case_ : list ):
if len(snake_case_ ) < 2:
return collection
def circle_sort_util(snake_case_ : list , snake_case_ : int , snake_case_ : int ) -> bool:
SCREAMING_SNAKE_CASE__: Dict= False
if low == high:
return swapped
SCREAMING_SNAKE_CASE__: str= low
SCREAMING_SNAKE_CASE__: Optional[int]= high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE__: List[Any]= True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE__: List[str]= True
SCREAMING_SNAKE_CASE__: Dict= low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE__: str= circle_sort_util(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= circle_sort_util(snake_case_ , mid + 1 , snake_case_ )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE__: Optional[int]= True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE__: Tuple= circle_sort_util(snake_case_ , 0 , len(snake_case_ ) - 1 )
return collection
if __name__ == "__main__":
lowercase_ : Tuple = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : str = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 107 | 1 |
from __future__ import annotations
import math
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCamelCase = u
for i in range(1 , UpperCamelCase_ ):
lowerCamelCase = temp * (u - i)
return temp
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase = int(input('enter the numbers of values: ' ) )
lowerCamelCase = []
for _ in range(UpperCamelCase_ ):
y.append([] )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
y[i].append(UpperCamelCase_ )
lowerCamelCase = 0
print('enter the values of parameters in a list: ' )
lowerCamelCase = list(map(UpperCamelCase_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(UpperCamelCase_ ):
lowerCamelCase = float(input() )
lowerCamelCase = int(input('enter the value to interpolate: ' ) )
lowerCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase_ ):
for j in range(n - i ):
lowerCamelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase = y[0][0]
for i in range(1 , UpperCamelCase_ ):
summ += (ucal(UpperCamelCase_ , UpperCamelCase_ ) * y[0][i]) / math.factorial(UpperCamelCase_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 246 |
from __future__ import annotations
def a_ ( UpperCamelCase_ : int | str ) -> bool:
"""simple docstring"""
lowerCamelCase = str(UpperCamelCase_ )
return n == n[::-1]
def a_ ( UpperCamelCase_ : int = 1_0_0_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = 0
for i in range(1 , UpperCamelCase_ ):
if is_palindrome(UpperCamelCase_ ) and is_palindrome(bin(UpperCamelCase_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 246 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def lowercase__ ( )-> List[Any]:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCamelCase = parser.parse_args()
return args.f
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = {}
UpperCamelCase = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
UpperCamelCase = json.load(__UpperCamelCase )
else:
raise ValueError(F"can't find {path}" )
return results
def lowercase__ ( )-> int:
UpperCamelCase = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a_ ( lowerCamelCase ):
@classmethod
def A__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 35 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( lowerCamelCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = tf_padding
UpperCamelCase = int(last_hidden_size * depth_multiplier )
UpperCamelCase = output_stride
UpperCamelCase = hidden_act
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 35 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a_ = 'Create a default config file for Accelerate with only a few flags set.'
def __lowercase ( lowerCamelCase : str="no" , lowerCamelCase : str = default_json_config_file , lowerCamelCase : bool = False ):
UpperCamelCase_ : Tuple = Path(lowerCamelCase )
path.parent.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
UpperCamelCase_ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
UpperCamelCase_ : Optional[Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase_ : Optional[Any] = torch.cuda.device_count()
UpperCamelCase_ : Optional[Any] = num_gpus
UpperCamelCase_ : List[Any] = False
if num_gpus > 1:
UpperCamelCase_ : Tuple = 'MULTI_GPU'
else:
UpperCamelCase_ : int = 'NO'
elif is_xpu_available() and use_xpu:
UpperCamelCase_ : List[str] = torch.xpu.device_count()
UpperCamelCase_ : str = num_xpus
UpperCamelCase_ : Tuple = False
if num_xpus > 1:
UpperCamelCase_ : Any = 'MULTI_XPU'
else:
UpperCamelCase_ : int = 'NO'
elif is_npu_available():
UpperCamelCase_ : Tuple = torch.npu.device_count()
UpperCamelCase_ : Dict = num_npus
UpperCamelCase_ : Union[str, Any] = False
if num_npus > 1:
UpperCamelCase_ : Dict = 'MULTI_NPU'
else:
UpperCamelCase_ : Optional[Any] = 'NO'
else:
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Any = True
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Optional[Any] = 'NO'
UpperCamelCase_ : str = ClusterConfig(**lowerCamelCase )
config.to_json_file(lowerCamelCase )
return path
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : str ):
UpperCamelCase_ : Optional[Any] = parser.add_parser('default' , parents=lowerCamelCase , help=lowerCamelCase , formatter_class=lowerCamelCase )
parser.add_argument(
'--config_file' , default=lowerCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCamelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCamelCase )
return parser
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 417 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Dict = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 714 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[str] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : List[Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = numpy.dtype(numpy.uintaa).newbyteorder(">")
return numpy.frombuffer(bytestream.read(4) , dtype=_lowerCamelCase)[0]
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Any:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : str = _readaa(_lowerCamelCase)
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name))
__UpperCamelCase : List[str] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = _readaa(_lowerCamelCase)
__UpperCamelCase : Optional[int] = _readaa(_lowerCamelCase)
__UpperCamelCase : Dict = bytestream.read(rows * cols * num_images)
__UpperCamelCase : Optional[int] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
__UpperCamelCase : Dict = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1)
return data
@deprecated(_lowerCamelCase , "Please use tf.one_hot on tensors.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str]) -> str:
'''simple docstring'''
__UpperCamelCase : str = labels_dense.shape[0]
__UpperCamelCase : str = numpy.arange(_lowerCamelCase) * num_classes
__UpperCamelCase : str = numpy.zeros((num_labels, num_classes))
__UpperCamelCase : Tuple = 1
return labels_one_hot
@deprecated(_lowerCamelCase , "Please use tf.data to implement this functionality.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[int]=10) -> Dict:
'''simple docstring'''
print("Extracting" , f.name)
with gzip.GzipFile(fileobj=_lowerCamelCase) as bytestream:
__UpperCamelCase : int = _readaa(_lowerCamelCase)
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name))
__UpperCamelCase : Any = _readaa(_lowerCamelCase)
__UpperCamelCase : List[Any] = bytestream.read(_lowerCamelCase)
__UpperCamelCase : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase)
return labels
class lowerCamelCase__ :
'''simple docstring'''
@deprecated(
a , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :int , a :Any , a :List[str] , a :Union[str, Any]=False , a :List[Any]=False , a :Dict=dtypes.floataa , a :int=True , a :Optional[int]=None , ) -> List[str]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = random_seed.get_seed(a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase : Optional[Any] = dtypes.as_dtype(a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__UpperCamelCase : str = 1_0_0_0_0
__UpperCamelCase : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase : List[Any] = images.astype(numpy.floataa )
__UpperCamelCase : Optional[Any] = numpy.multiply(a , 1.0 / 255.0 )
__UpperCamelCase : Optional[Any] = images
__UpperCamelCase : List[Any] = labels
__UpperCamelCase : str = 0
__UpperCamelCase : Union[str, Any] = 0
@property
def _lowerCamelCase ( self :Any ) -> Any:
return self._images
@property
def _lowerCamelCase ( self :Any ) -> Dict:
return self._labels
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._num_examples
@property
def _lowerCamelCase ( self :Tuple ) -> Dict:
return self._epochs_completed
def _lowerCamelCase ( self :Any , a :Optional[int] , a :Optional[int]=False , a :int=True ) -> Optional[int]:
if fake_data:
__UpperCamelCase : Any = [1] * 7_8_4
__UpperCamelCase : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a )],
[fake_label for _ in range(a )],
)
__UpperCamelCase : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : int = self.images[perma]
__UpperCamelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase : Optional[int] = self._num_examples - start
__UpperCamelCase : Optional[int] = self._images[start : self._num_examples]
__UpperCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(a )
__UpperCamelCase : Optional[Any] = self.images[perm]
__UpperCamelCase : Tuple = self.labels[perm]
# Start next epoch
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Union[str, Any] = batch_size - rest_num_examples
__UpperCamelCase : List[str] = self._index_in_epoch
__UpperCamelCase : Dict = self._images[start:end]
__UpperCamelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , "Please write your own downloading logic.")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]) -> Tuple:
'''simple docstring'''
if not gfile.Exists(_lowerCamelCase):
gfile.MakeDirs(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not gfile.Exists(_lowerCamelCase):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase) # noqa: S310
with gfile.GFile(_lowerCamelCase) as f:
__UpperCamelCase : Any = f.size()
print("Successfully downloaded" , _lowerCamelCase , _lowerCamelCase , "bytes.")
return filepath
@deprecated(
_lowerCamelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=dtypes.floataa , _lowerCamelCase : Any=True , _lowerCamelCase : Union[str, Any]=5_000 , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=DEFAULT_SOURCE_URL , ) -> List[Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase)
__UpperCamelCase : Optional[int] = fake()
__UpperCamelCase : Tuple = fake()
__UpperCamelCase : List[str] = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase)
if not source_url: # empty string check
__UpperCamelCase : str = DEFAULT_SOURCE_URL
__UpperCamelCase : Optional[int] = "train-images-idx3-ubyte.gz"
__UpperCamelCase : Dict = "train-labels-idx1-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
__UpperCamelCase : List[str] = "t10k-labels-idx1-ubyte.gz"
__UpperCamelCase : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_images(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : int = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
__UpperCamelCase : int = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : Optional[int] = _extract_images(_lowerCamelCase)
__UpperCamelCase : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file)
with gfile.Open(_lowerCamelCase , "rb") as f:
__UpperCamelCase : List[str] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase)
if not 0 <= validation_size <= len(_lowerCamelCase):
__UpperCamelCase : str = (
"Validation size should be between 0 and "
F'{len(_lowerCamelCase)}. Received: {validation_size}.'
)
raise ValueError(_lowerCamelCase)
__UpperCamelCase : Any = train_images[:validation_size]
__UpperCamelCase : Optional[Any] = train_labels[:validation_size]
__UpperCamelCase : Optional[int] = train_images[validation_size:]
__UpperCamelCase : Tuple = train_labels[validation_size:]
__UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
__UpperCamelCase : Union[str, Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : str = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
__UpperCamelCase : Optional[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase) | 94 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''luke'''
def __init__( self , _snake_case=5_02_67 , _snake_case=50_00_00 , _snake_case=7_68 , _snake_case=2_56 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=True , _snake_case=None , _snake_case=1 , _snake_case=0 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 4 |
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 137 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ):
'''simple docstring'''
lowercase = limit + 1
lowercase = [0] * limit
for first_term in range(1 , lowerCAmelCase__ ):
for n in range(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Tuple , __UpperCamelCase: Any , __UpperCamelCase: str=7 , __UpperCamelCase: Tuple=3 , __UpperCamelCase: Dict=18 , __UpperCamelCase: str=30 , __UpperCamelCase: Optional[int]=400 , __UpperCamelCase: str=True , __UpperCamelCase: Tuple=None , __UpperCamelCase: int=True , ) -> str:
__magic_name__ : Dict = size if size is not None else {"height": 18, "width": 18}
__magic_name__ : Dict = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : str = image_size
__magic_name__ : Optional[int] = min_resolution
__magic_name__ : Dict = max_resolution
__magic_name__ : Optional[Any] = do_resize
__magic_name__ : Optional[int] = size
__magic_name__ : List[Any] = apply_ocr
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
__magic_name__ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: str ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "apply_ocr" ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
__magic_name__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Any:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
__magic_name__ : Dict = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: Tuple ) -> Any:
# Initialize image_processing
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ : int = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ : List[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
# with apply_OCR = True
__magic_name__ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
__magic_name__ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__magic_name__ : Tuple = Image.open(ds[0]["file"] ).convert("RGB" )
__magic_name__ : List[str] = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__magic_name__ : List[str] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__magic_name__ : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
__magic_name__ : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
__magic_name__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 436 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class _snake_case ( snake_case_ , snake_case_ ):
'''simple docstring'''
__snake_case = "resnet"
__snake_case = ["basic", "bottleneck"]
def __init__( self: List[Any] , __UpperCamelCase: Tuple=3 , __UpperCamelCase: List[str]=64 , __UpperCamelCase: int=[256, 512, 1024, 2048] , __UpperCamelCase: Union[str, Any]=[3, 4, 6, 3] , __UpperCamelCase: str="bottleneck" , __UpperCamelCase: List[Any]="relu" , __UpperCamelCase: List[str]=False , __UpperCamelCase: List[str]=None , __UpperCamelCase: Tuple=None , **__UpperCamelCase: Dict , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
__magic_name__ : int = num_channels
__magic_name__ : Optional[Any] = embedding_size
__magic_name__ : str = hidden_sizes
__magic_name__ : Any = depths
__magic_name__ : int = layer_type
__magic_name__ : Any = hidden_act
__magic_name__ : Optional[int] = downsample_in_first_stage
__magic_name__ : List[Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
__magic_name__ , __magic_name__ : int = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self: Any ) -> float:
return 1E-3 | 436 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A__ = logging.get_logger(__name__)
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self: Any , *__UpperCamelCase: Dict , **__UpperCamelCase: List[Any] ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained('google/mt5-small' )
__magic_name__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__magic_name__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__magic_name__ = model(input_ids.to(__UpperCamelCase ) , labels=labels.to(__UpperCamelCase ) ).loss
__magic_name__ = -(labels.shape[-1] * loss.item())
__magic_name__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 184 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__lowerCAmelCase ={
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __magic_name__ ( _a):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
_UpperCAmelCase : List[Any] = DistilBertTokenizer
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Tuple=None ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : List[Any]="[UNK]" ,__SCREAMING_SNAKE_CASE : List[Any]="[SEP]" ,__SCREAMING_SNAKE_CASE : Tuple="[PAD]" ,__SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" ,__SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : str=None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,do_lower_case=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=__SCREAMING_SNAKE_CASE ,strip_accents=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,__SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" ,__SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,__SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(__SCREAMING_SNAKE_CASE ,normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = do_lower_case
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Any=None ):
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[int] ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[str] = None ):
UpperCAmelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 333 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__magic_name__ : int = key.replace('''heads.cmd.mim_head.cls.predictions''', '''mmm_image_head''' )
__magic_name__ : Any = key.replace('''heads.cmd.mlm_head.cls.predictions''', '''mmm_text_head''' )
__magic_name__ : str = key.replace('''heads.cmd.itm_head.cls''', '''itm_head''' )
__magic_name__ : Dict = key.replace('''heads.cmd.itm_head.pooler''', '''itm_head.pooler''' )
__magic_name__ : List[str] = key.replace('''heads.cmd.clip_head.logit_scale''', '''flava.logit_scale''' )
__magic_name__ : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''', '''mlm_head''' )
__magic_name__ : int = key.replace('''heads.imagenet.mim_head.cls.predictions''', '''mim_head''' )
__magic_name__ : int = key.replace('''mm_text_projection''', '''flava.text_to_mm_projection''' )
__magic_name__ : List[Any] = key.replace('''mm_image_projection''', '''flava.image_to_mm_projection''' )
__magic_name__ : Any = key.replace('''image_encoder.module''', '''flava.image_model''' )
__magic_name__ : Optional[int] = key.replace('''text_encoder.module''', '''flava.text_model''' )
__magic_name__ : Dict = key.replace('''mm_encoder.module.encoder.cls_token''', '''flava.multimodal_model.cls_token''' )
__magic_name__ : Optional[Any] = key.replace('''mm_encoder.module''', '''flava.multimodal_model''' )
__magic_name__ : List[str] = key.replace('''text_projection''', '''flava.text_projection''' )
__magic_name__ : str = key.replace('''image_projection''', '''flava.image_projection''' )
__magic_name__ : List[Any] = value.float()
for key, value in codebook_state_dict.items():
__magic_name__ : str = value
return upgrade
@torch.no_grad()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None ) ->str:
"""simple docstring"""
if config_path is not None:
__magic_name__ : Tuple = FlavaConfig.from_pretrained(UpperCAmelCase )
else:
__magic_name__ : Union[str, Any] = FlavaConfig()
__magic_name__ : List[str] = FlavaForPreTraining(UpperCAmelCase ).eval()
__magic_name__ : List[str] = convert_dalle_checkpoint(UpperCAmelCase, UpperCAmelCase, save_checkpoint=UpperCAmelCase )
if os.path.exists(UpperCAmelCase ):
__magic_name__ : Tuple = torch.load(UpperCAmelCase, map_location='''cpu''' )
else:
__magic_name__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCAmelCase, map_location='''cpu''' )
__magic_name__ : List[str] = upgrade_state_dict(UpperCAmelCase, UpperCAmelCase )
hf_model.load_state_dict(UpperCAmelCase )
__magic_name__ : List[Any] = hf_model.state_dict()
__magic_name__ : Any = count_parameters(UpperCAmelCase )
__magic_name__ : Tuple = count_parameters(UpperCAmelCase ) + count_parameters(UpperCAmelCase )
assert torch.allclose(UpperCAmelCase, UpperCAmelCase, atol=1E-3 )
hf_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 721 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase_ = True
except (ImportError, AttributeError):
lowercase_ = object
def lowerCAmelCase ( *UpperCAmelCase, **UpperCAmelCase ) ->Any:
"""simple docstring"""
pass
lowercase_ = False
lowercase_ = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCAmelCase, args.host, args.port, args.workers )
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : dict
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[int]]
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : str
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any
class A__ ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def lowercase ( lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : int = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
__magic_name__ : List[str] = pipeline
__magic_name__ : Union[str, Any] = host
__magic_name__ : int = port
__magic_name__ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
__magic_name__ : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
] , timeout=600 , )
def lowercase ( self ) -> Dict:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self ) -> str:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Any:
"""simple docstring"""
try:
__magic_name__ : List[str] = self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
__magic_name__ : int = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase , tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , ) -> Any:
"""simple docstring"""
try:
__magic_name__ : Any = self._pipeline.tokenizer.decode(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def lowercase ( self , lowerCamelCase=Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Optional[int]:
"""simple docstring"""
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__magic_name__ : Optional[int] = self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCamelCase )} )
| 336 | 0 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 573 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=10 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=None , ) -> Optional[Any]:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = crop_size
def lowerCamelCase_ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = VivitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 573 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any]=() , _snake_case : List[str]=None , _snake_case : int="no" , _snake_case : Any="29500" ):
lowerCAmelCase : List[str] = False
lowerCAmelCase : Union[str, Any] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCAmelCase : str = True
elif "IPython" in sys.modules:
lowerCAmelCase : Optional[int] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCAmelCase : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCAmelCase : str = 8
lowerCAmelCase : List[str] = PrepareForLaunch(UpperCAmelCase__ , distributed_type='''TPU''' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='''127.0.01''' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
lowerCAmelCase : str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='''MULTI_GPU''' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase : str = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*UpperCAmelCase__ )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str]=() , _snake_case : Optional[int]=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCAmelCase : Optional[Any] = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='''fork''' )
| 714 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637 | 0 |
def snake_case (UpperCAmelCase__ ) -> str:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase_: Optional[Any] = False
if num < 0:
UpperCamelCase_: Tuple = True
UpperCamelCase_: int = -num
UpperCamelCase_: list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCAmelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCAmelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase , lowercase = 0, 1
while True:
lowercase , lowercase = b, a + b
yield b
def UpperCAmelCase_ ( lowerCAmelCase_ = 1000 ):
"""simple docstring"""
lowercase = 1
lowercase = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
UpperCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCAmelCase ( _a : str ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = git.Repo(search_parent_directories=_a )
lowerCAmelCase_ : Tuple = {
"""repo_id""": str(_a ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(_a , """git_log.json""" ) , """w""" ) as f:
json.dump(_a , _a , indent=4 )
def _lowerCAmelCase ( _a : Tuple ) -> Optional[int]:
if params.n_gpu <= 0:
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = -1
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Any = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase_ : List[Any] = int(os.environ["""WORLD_SIZE"""] )
lowerCAmelCase_ : Tuple = int(os.environ["""N_GPU_NODE"""] )
lowerCAmelCase_ : Dict = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowerCAmelCase_ : Tuple = params.world_size // params.n_gpu_per_node
lowerCAmelCase_ : Union[str, Any] = params.global_rank // params.n_gpu_per_node
lowerCAmelCase_ : Union[str, Any] = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : List[str] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase_ : Optional[Any] = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase_ : str = params.n_nodes > 1
# summary
lowerCAmelCase_ : Optional[int] = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def _lowerCAmelCase ( _a : Optional[int] ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 440 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : str = logging.get_logger(__name__)
@dataclass
class lowercase__ :
__UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
__UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase = field(
default=__A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = self.task_name.lower()
class lowercase__ ( __A ):
__UpperCamelCase = """train"""
__UpperCamelCase = """dev"""
__UpperCamelCase = """test"""
class lowercase__ ( __A ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowercase , )
lowerCAmelCase_ : Any = args
lowerCAmelCase_ : List[str] = glue_processors[args.task_name]()
lowerCAmelCase_ : Tuple = glue_output_modes[args.task_name]
if isinstance(_lowercase , _lowercase ):
try:
lowerCAmelCase_ : Dict = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCAmelCase_ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase_ : List[str] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = label_list[2], label_list[1]
lowerCAmelCase_ : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ : Optional[int] = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowerCAmelCase_ : Dict = time.time()
lowerCAmelCase_ : str = torch.load(_lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase_ : Dict = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase_ : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase_ : Optional[int] = examples[:limit_length]
lowerCAmelCase_ : Any = glue_convert_examples_to_features(
_lowercase , _lowercase , max_length=args.max_seq_length , label_list=_lowercase , output_mode=self.output_mode , )
lowerCAmelCase_ : str = time.time()
torch.save(self.features , _lowercase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _lowercase ):
return self.features[i]
def UpperCAmelCase__ ( self ):
return self.label_list
| 440 | 1 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return abs(lowerCAmelCase_) if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = y, x % y
return abs(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
try:
lowerCamelCase_ : Dict = input("Enter two integers separated by comma (,): ").split(",")
lowerCamelCase_ : Tuple = int(nums[0])
lowerCamelCase_ : Any = int(nums[1])
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(lowerCAmelCase_ , lowerCAmelCase_)}""")
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase_ , lowerCAmelCase_)}""")
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input")
if __name__ == "__main__":
main()
| 250 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=0.2 , _lowerCamelCase : List[Any]=0.2 ):
_snake_case = bp_numa
_snake_case = bp_numa
_snake_case = bp_numa
_snake_case = conva_get[:2]
_snake_case = conva_get[2]
_snake_case = size_pa
_snake_case = rate_w
_snake_case = rate_t
_snake_case = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_snake_case = -2 * np.random.rand(self.conva[1] ) + 1
_snake_case = -2 * np.random.rand(self.num_bpa ) + 1
_snake_case = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase ( self : Optional[int] , _lowerCamelCase : Any ):
# save model dict with pickle
_snake_case = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f'''Model saved: {save_path}''' )
@classmethod
def lowercase ( cls : Union[str, Any] , _lowerCamelCase : Any ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
_snake_case = pickle.load(_lowercase ) # noqa: S301
_snake_case = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
_snake_case = model_dic.get('''size_pooling1''' )
_snake_case = model_dic.get('''num_bp1''' )
_snake_case = model_dic.get('''num_bp2''' )
_snake_case = model_dic.get('''num_bp3''' )
_snake_case = model_dic.get('''rate_weight''' )
_snake_case = model_dic.get('''rate_thre''' )
# create model instance
_snake_case = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
_snake_case = model_dic.get('''w_conv1''' )
_snake_case = model_dic.get('''wkj''' )
_snake_case = model_dic.get('''vji''' )
_snake_case = model_dic.get('''thre_conv1''' )
_snake_case = model_dic.get('''thre_bp2''' )
_snake_case = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase ( self : Tuple , _lowerCamelCase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase ( self : List[Any] , _lowerCamelCase : Dict ):
return round(_lowercase , 3 )
def lowercase ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
# convolution process
_snake_case = convs[0]
_snake_case = convs[1]
_snake_case = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
_snake_case = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
_snake_case = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
_snake_case = []
_snake_case = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
_snake_case = []
for i_focus in range(len(_lowercase ) ):
_snake_case = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
_snake_case = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
_snake_case = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
_snake_case = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[Any]="average_pool" ):
# pooling process
_snake_case = len(featuremaps[0] )
_snake_case = int(size_map / size_pooling )
_snake_case = []
for i_map in range(len(_lowercase ) ):
_snake_case = featuremaps[i_map]
_snake_case = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
_snake_case = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
_snake_case = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase ( self : Any , _lowerCamelCase : Tuple ):
# expanding three dimension data to one dimension list
_snake_case = []
for i in range(len(_lowercase ) ):
_snake_case = np.shape(data[i] )
_snake_case = data[i].reshape(1 , shapes[0] * shapes[1] )
_snake_case = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
_snake_case = np.asarray(_lowercase )
return data_expanded
def lowercase ( self : Tuple , _lowerCamelCase : str ):
# expanding matrix to one dimension list
_snake_case = np.asarray(_lowercase )
_snake_case = np.shape(_lowercase )
_snake_case = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = []
_snake_case = 0
for i_map in range(_lowercase ):
_snake_case = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
_snake_case = pd_pool[
i_pool
]
_snake_case = i_pool + 1
_snake_case = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
_snake_case = 0
_snake_case = []
_snake_case = 10000
while rp < n_repeat and mse >= error_accuracy:
_snake_case = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
_snake_case = np.asmatrix(datas_train[p] )
_snake_case = np.asarray(datas_teach[p] )
_snake_case , _snake_case = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case = self.pooling(_lowercase , self.size_poolinga )
_snake_case = np.shape(_lowercase )
_snake_case = self._expand(_lowercase )
_snake_case = data_bp_input
_snake_case = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
_snake_case = self.sig(_lowercase )
_snake_case = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
_snake_case = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_snake_case = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
_snake_case = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
_snake_case = np.dot(_lowercase , self.vji )
_snake_case = pd_i_all / (self.size_poolinga * self.size_poolinga)
_snake_case = pd_conva_pooled.T.getA().tolist()
_snake_case = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_snake_case = self._expand_mat(pd_conva_all[k_conv] )
_snake_case = self.rate_weight * np.dot(_lowercase , _lowercase )
_snake_case = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_snake_case = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_snake_case = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_snake_case = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_snake_case = self.thre_bpa - pd_k_all * self.rate_thre
_snake_case = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_snake_case = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_snake_case = rp + 1
_snake_case = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
_snake_case = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowercase ( self : str , _lowerCamelCase : Optional[int] ):
# model predict
_snake_case = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
_snake_case = np.asmatrix(datas_test[p] )
_snake_case , _snake_case = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case = self.pooling(_lowercase , self.size_poolinga )
_snake_case = self._expand(_lowercase )
_snake_case = data_bp_input
_snake_case = bp_outa * self.vji.T - self.thre_bpa
_snake_case = self.sig(_lowercase )
_snake_case = bp_outa * self.wkj.T - self.thre_bpa
_snake_case = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
_snake_case = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : int ):
# return the data of image after convoluting process so we can check it out
_snake_case = np.asmatrix(_lowercase )
_snake_case , _snake_case = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_snake_case = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 703 |
"""simple docstring"""
import os
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case = len(grid[0] )
_snake_case = len(__lowerCamelCase )
_snake_case = 0
_snake_case = 0
_snake_case = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
_snake_case = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_snake_case = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_snake_case = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_snake_case = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_snake_case = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
_snake_case = max_product
return largest
def _UpperCAmelCase ( ) -> str:
_snake_case = []
with open(os.path.dirname(__lowerCamelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_snake_case = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 430 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = (DPMSolverSDEScheduler,)
_snake_case : List[Any] = 1_0
def A ( self : int , **A_ : int )-> Dict:
__UpperCamelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**A_ )
return config
def A ( self : Union[str, Any] )-> Optional[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def A ( self : Any )-> List[str]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def A ( self : Optional[int] )-> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def A ( self : Union[str, Any] )-> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def A ( self : Any )-> List[Any]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(A_ , A_ )
__UpperCamelCase = model(A_ , A_ )
__UpperCamelCase = scheduler.step(A_ , A_ , A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def A ( self : str )-> Any:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(A_ , A_ )
__UpperCamelCase = model(A_ , A_ )
__UpperCamelCase = scheduler.step(A_ , A_ , A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(A_ , A_ )
__UpperCamelCase = model(A_ , A_ )
__UpperCamelCase = scheduler.step(A_ , A_ , A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(A_ )
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(A_ , A_ )
__UpperCamelCase = model(A_ , A_ )
__UpperCamelCase = scheduler.step(A_ , A_ , A_ )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 | 505 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = KandinskyInpaintPipeline
_snake_case : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case : str = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : Optional[Any] = False
@property
def A ( self : int )-> Tuple:
return 32
@property
def A ( self : int )-> List[Any]:
return 32
@property
def A ( self : Dict )-> Tuple:
return self.time_input_dim
@property
def A ( self : Union[str, Any] )-> Tuple:
return self.time_input_dim * 4
@property
def A ( self : Dict )-> str:
return 1_00
@property
def A ( self : int )-> Dict:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : str )-> List[Any]:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any]=0 )-> Dict:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A ( self : Union[str, Any] )-> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any )-> str:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = "a hat"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ ) | 505 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A_ :
_A :int
_A :int
class A_ :
def __init__( self : List[str] , snake_case__ : int ):
lowercase = [[] for _ in range(snake_case__ )]
lowercase = size
def __getitem__( self : Optional[int] , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._size
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ):
lowercase = deque([start_vertex] )
lowercase = [None] * self.size
lowercase = 0
while queue:
lowercase = queue.popleft()
lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase = current_distance + edge.weight
lowercase = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A_ ( __a ):
_A :Tuple = '''data2vec-audio'''
def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase = hidden_size
lowercase = feat_extract_activation
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = conv_pos_kernel_size
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return math.prod(self.conv_stride )
| 72 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( a, a, a, a, a ):
"""simple docstring"""
with open(a ) as metadata_file:
_a = json.load(a )
_a = LukeConfig(use_entity_aware_attention=a, **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_a = torch.load(a, map_location="cpu" )
# Load the entity vocab file
_a = load_entity_vocab(a )
_a = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_a = AddedToken("<ent>", lstrip=a, rstrip=a )
_a = AddedToken("<ent2>", lstrip=a, rstrip=a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a )
with open(os.path.join(a, LukeTokenizer.vocab_files_names["entity_vocab_file"] ), "w" ) as f:
json.dump(a, a )
_a = LukeTokenizer.from_pretrained(a )
# Initialize the embeddings of the special tokens
_a = state_dict["embeddings.word_embeddings.weight"]
_a = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
_a = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
_a = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a = F'encoder.layer.{layer_index}.attention.self.'
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a = state_dict["entity_embeddings.entity_embeddings.weight"]
_a = entity_emb[entity_vocab["[MASK]"]]
_a = LukeModel(config=a ).eval()
_a , _a = model.load_state_dict(a, strict=a )
if not (len(a ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(a )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_a = LukeTokenizer.from_pretrained(a, task="entity_classification" )
_a = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
_a = (3_9, 4_2)
_a = tokenizer(a, entity_spans=[span], add_prefix_space=a, return_tensors="pt" )
_a = model(**a )
# Verify word hidden states
if model_size == "large":
_a = torch.Size((1, 4_2, 1_0_2_4) )
_a = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_a = torch.Size((1, 4_2, 7_6_8) )
_a = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], a, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_a = torch.Size((1, 1, 1_0_2_4) )
_a = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_a = torch.Size((1, 1, 7_6_8) )
_a = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], a, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(a ) )
model.save_pretrained(a )
def __a ( a ):
"""simple docstring"""
_a = {}
with open(a, "r", encoding="utf-8" ) as f:
for index, line in enumerate(a ):
_a , _a = line.rstrip().split("\t" )
_a = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 388 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
def __init__( self :Any , UpperCamelCase__ :Tuple="</s>" , UpperCamelCase__ :str="<unk>" , UpperCamelCase__ :List[Any]="<pad>" , UpperCamelCase__ :Optional[int]=125 , UpperCamelCase__ :Union[str, Any]=None , **UpperCamelCase__ :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [f'<extra_id_{i}>' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_a = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_a = extra_ids
_a = 2**8 # utf is 8 bits
# define special tokens dict
_a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_a = len(self.special_tokens_encoder )
_a = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
_a = self.vocab_size + i - n
_a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None , UpperCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] ):
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
_a = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :str ):
_a = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[Any] ):
if token in self.special_tokens_encoder:
_a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_a = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
_a = self.unk_token_id
else:
_a = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :List[str] ):
if index in self.special_tokens_decoder:
_a = self.special_tokens_decoder[index]
else:
_a = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Any ):
_a = B""
for token in tokens:
if token in self.special_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
_a = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
_a = token.encode("utf-8" )
else:
_a = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
_a = bstring.decode("utf-8" , errors="ignore" )
return string
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[str] = None ):
return ()
| 388 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Tuple = logging.get_logger(__name__)
A_ :Any = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ='''transfo-xl'''
UpperCamelCase__ : List[Any] =['''mems''']
UpperCamelCase__ : List[str] ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase__=267735 , lowerCamelCase__=[20000, 40000, 200000] , lowerCamelCase__=1024 , lowerCamelCase__=1024 , lowerCamelCase__=16 , lowerCamelCase__=64 , lowerCamelCase__=4096 , lowerCamelCase__=4 , lowerCamelCase__=False , lowerCamelCase__=18 , lowerCamelCase__=1600 , lowerCamelCase__=1000 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=-1 , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="normal" , lowerCamelCase__=0.01 , lowerCamelCase__=0.01 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-5 , lowerCamelCase__=0 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[Any] =vocab_size
__UpperCamelCase : Optional[Any] =[]
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
__UpperCamelCase : List[str] =[False] + [True] * len(self.cutoffs )
else:
__UpperCamelCase : int =[False] + [False] * len(self.cutoffs )
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[int] =d_embed
__UpperCamelCase : List[str] =d_head
__UpperCamelCase : List[Any] =d_inner
__UpperCamelCase : Optional[Any] =div_val
__UpperCamelCase : str =pre_lnorm
__UpperCamelCase : Union[str, Any] =n_layer
__UpperCamelCase : Any =n_head
__UpperCamelCase : List[str] =mem_len
__UpperCamelCase : Optional[Any] =same_length
__UpperCamelCase : Optional[int] =attn_type
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : int =sample_softmax
__UpperCamelCase : List[str] =adaptive
__UpperCamelCase : Tuple =dropout
__UpperCamelCase : Dict =dropatt
__UpperCamelCase : Optional[Any] =untie_r
__UpperCamelCase : Dict =init
__UpperCamelCase : Optional[Any] =init_range
__UpperCamelCase : List[Any] =proj_init_std
__UpperCamelCase : Dict =init_std
__UpperCamelCase : List[Any] =layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 705 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A_ :int = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
A_ :str = dataset.iloc[:, 1:2].values
A_ :int = dataset.iloc[:, 2].values
A_ ,A_ ,A_ ,A_ :str = train_test_split(X, y, test_size=0.2, random_state=0)
A_ :Optional[int] = PolynomialFeatures(degree=4)
A_ :Optional[int] = poly_reg.fit_transform(X)
A_ :Tuple = LinearRegression()
pol_reg.fit(X_poly, y)
def A ( ) -> List[str]:
plt.scatter(a_ ,a_ ,color='red' )
plt.plot(a_ ,pol_reg.predict(poly_reg.fit_transform(a_ ) ) ,color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 154 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : str):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.")
SCREAMING_SNAKE_CASE_: Union[str, Any] = model
SCREAMING_SNAKE_CASE_: Any = kwargs.get("model_save_dir" , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = kwargs.get("latest_model_name" , lowerCAmelCase__)
def __call__( self : Any , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Any = {k: np.array(lowerCAmelCase__) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=None):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider")
SCREAMING_SNAKE_CASE_: Any = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name)
SCREAMING_SNAKE_CASE_: str = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_: int = self.model_save_dir.joinpath(lowerCAmelCase__)
if src_path.exists():
SCREAMING_SNAKE_CASE_: int = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : List[Any] , ):
if os.path.isfile(lowerCAmelCase__):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : Optional[Union[bool, str, None]] = None , lowerCAmelCase__ : Optional[Union[str, None]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional["ort.SessionOptions"] = None , **lowerCAmelCase__ : str , ):
SCREAMING_SNAKE_CASE_: Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = Path(lowerCAmelCase__)
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_: Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[Any] = Path(lowerCAmelCase__).parent
SCREAMING_SNAKE_CASE_: str = Path(lowerCAmelCase__).name
SCREAMING_SNAKE_CASE_: int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : List[Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = None
if len(str(lowerCAmelCase__).split("@")) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = model_id.split("@")
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 671 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCAmelCase : Tuple = data_utils.TransfoXLTokenizer
__UpperCAmelCase : Union[str, Any] = data_utils.TransfoXLCorpus
__UpperCAmelCase : Optional[Any] = data_utils
__UpperCAmelCase : Optional[Any] = data_utils
def lowercase_ ( __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case , "rb" ) as fp:
snake_case__ :Optional[int] = pickle.load(__snake_case , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case__ :Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case__ :Union[str, Any] = corpus.vocab.__dict__
torch.save(__snake_case , __snake_case )
snake_case__ :str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , __snake_case )
snake_case__ :str = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(__snake_case , __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case__ :Tuple = os.path.abspath(__snake_case )
snake_case__ :int = os.path.abspath(__snake_case )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case__ :Dict = TransfoXLConfig()
else:
snake_case__ :List[Any] = TransfoXLConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
snake_case__ :str = TransfoXLLMHeadModel(__snake_case )
snake_case__ :Any = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
snake_case__ :Dict = os.path.join(__snake_case , __snake_case )
snake_case__ :int = os.path.join(__snake_case , __snake_case )
print(F'Save PyTorch model to {os.path.abspath(__snake_case )}' )
torch.save(model.state_dict() , __snake_case )
print(F'Save configuration file to {os.path.abspath(__snake_case )}' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 701 |
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case ) | 57 | 0 |