code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
snake_case__ = parser.parse_args()
if args.model_type == "bert":
snake_case__ = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ = "bert"
else:
raise ValueError("""args.model_type should be \"bert\".""")
snake_case__ = model.state_dict()
snake_case__ = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ = state_dict[F'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
snake_case__ = state_dict[F'{prefix}.embeddings.LayerNorm.{w}']
snake_case__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
snake_case__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
snake_case__ = state_dict["cls.predictions.decoder.weight"]
snake_case__ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ = state_dict[F'cls.predictions.transform.dense.{w}']
snake_case__ = state_dict[F'cls.predictions.transform.LayerNorm.{w}']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ (__lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : str = data
def __iter__( self : Tuple ):
"""simple docstring"""
for element in self.data:
yield element
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=True ) -> List[Any]:
A_ : Optional[Any] = Accelerator(even_batches=__a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : bool = False ) -> Optional[Any]:
if iterable:
A_ : int = DummyIterableDataset(torch.as_tensor(range(__a ) ) )
else:
A_ : List[Any] = TensorDataset(torch.as_tensor(range(__a ) ) )
A_ : Optional[Any] = DataLoader(__a , batch_size=__a )
A_ : Tuple = accelerator.prepare(__a )
return dl
def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , ) -> Union[str, Any]:
A_ : str = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a )
A_ : List[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case__ ( ) -> Optional[Any]:
A_ : List[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case__ ( ) -> Any:
A_ : Union[str, Any] = create_accelerator(even_batches=__a )
verify_dataloader_batch_sizes(
__a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case__ ( ) -> Union[str, Any]:
A_ : Optional[Any] = create_accelerator(even_batches=__a )
A_ : List[Any] = torch.nn.Linear(1 , 1 )
A_ : Optional[Any] = accelerator.prepare(__a )
A_ : Any = create_dataloader(__a , dataset_size=3 , batch_size=1 )
A_ : Tuple = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__a ):
A_ : List[str] = ddp_model(batch[0].float() )
A_ : Union[str, Any] = output.sum()
loss.backward()
batch_idxs.append(__a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]:
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case__ ( ) -> Tuple:
A_ : Union[str, Any] = True
A_ : Optional[int] = False
A_ : Dict = create_accelerator(even_batches=__a )
A_ : str = torch.nn.Linear(1 , 1 )
A_ : int = accelerator.prepare(__a )
A_ : int = create_dataloader(__a , dataset_size=3 , batch_size=1 )
A_ : List[Any] = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
A_ : List[Any] = train_dl.batch_sampler.even_batches
A_ : Optional[int] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ) -> Optional[int]:
A_ : Dict = True
A_ : Optional[Any] = False
A_ : List[str] = create_accelerator(even_batches=__a )
A_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
A_ : Dict = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
A_ : List[Any] = create_dataloader(__a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
A_ : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ) -> Any:
A_ : Any = create_accelerator()
A_ : List[Any] = torch.nn.Linear(1 , 1 )
A_ : Any = accelerator.prepare(__a )
create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a )
with warnings.catch_warnings(record=__a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ):
pass
assert issubclass(w[-1].category , __a )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case__ ( ) -> Optional[int]:
A_ : Any = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
A_ : Optional[Any] = accelerator.state.distributed_type
A_ : Optional[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__a )
A_ : str = original_state
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCamelCase_ (logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _a ( _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _a ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , *_lowerCamelCase : Any , **_lowerCamelCase : str ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A_ : Dict = kwargs.pop('''main_process_only''' , lowercase_ )
A_ : Optional[Any] = kwargs.pop('''in_order''' , lowercase_ )
if self.isEnabledFor(lowercase_ ):
if self._should_log(lowercase_ ):
A_ : str = self.process(lowercase_ , lowercase_ )
self.logger.log(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ )
elif in_order:
A_ : int = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A_ : Dict = self.process(lowercase_ , lowercase_ )
self.logger.log(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ )
state.wait_for_everyone()
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str = None ) -> Tuple:
if log_level is None:
A_ : str = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __lowerCamelCase )
A_ : Dict = logging.getLogger(__lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowerCamelCase , {} )
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
A_ : int = img
A_ : Optional[int] = img.shape[1]
A_ : str = img.shape[0]
A_ : int = dst_width
A_ : Any = dst_height
A_ : Tuple = self.src_w / self.dst_w
A_ : Any = self.src_h / self.dst_h
A_ : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _a ( self : Any ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ : List[str] = self.img[self.get_y(a_ )][self.get_x(a_ )]
def _a ( self : Dict , _lowerCamelCase : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def _a ( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
snake_case__ = 8_00, 6_00
snake_case__ = imread("""image_data/lena.jpg""", 1)
snake_case__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 351 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ', SCREAMING_SNAKE_CASE_, )
class UpperCamelCase_ (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCAmelCase = RobertaConfig
_lowerCAmelCase = 'roberta'
def __init__( self : Any , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(snake_case__ )
A_ : Union[str, Any] = RobertaEmbeddings(snake_case__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ', SCREAMING_SNAKE_CASE_, )
class UpperCamelCase_ (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCAmelCase = RobertaConfig
_lowerCAmelCase = 'roberta'
def __init__( self : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
super().__init__(snake_case__ )
A_ : Tuple = config.num_labels
A_ : Any = config.num_hidden_layers
A_ : int = DeeRobertaModel(snake_case__ )
A_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
A_ : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case__ )
def _a ( self : List[str] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=-1 , _lowerCamelCase : Union[str, Any]=False , ):
"""simple docstring"""
A_ : int = self.num_layers
try:
A_ : str = self.roberta(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , )
A_ : List[Any] = outputs[1]
A_ : Tuple = self.dropout(snake_case__ )
A_ : Optional[int] = self.classifier(snake_case__ )
A_ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A_ : List[Any] = e.message
A_ : Dict = e.exit_layer
A_ : List[Any] = outputs[0]
if not self.training:
A_ : Union[str, Any] = entropy(snake_case__ )
A_ : Optional[Any] = []
A_ : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A_ : Optional[Any] = MSELoss()
A_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A_ : Dict = CrossEntropyLoss()
A_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A_ : Union[str, Any] = []
for highway_exit in outputs[-1]:
A_ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A_ : Any = MSELoss()
A_ : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A_ : Dict = CrossEntropyLoss()
A_ : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case__ )
if train_highway:
A_ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A_ : Dict = (loss,) + outputs
if not self.training:
A_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A_ : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 352 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : List[Any] , *_lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
A_ : str = eval_examples
A_ : Union[str, Any] = post_process_function
def _a ( self : Dict , _lowerCamelCase : Optional[Dataset] = None , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "eval" , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : int = gen_kwargs.copy()
A_ : Any = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
A_ : Any = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
A_ : Dict = gen_kwargs
A_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
A_ : str = self.get_eval_dataloader(__UpperCAmelCase )
A_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Any = self.compute_metrics
A_ : Optional[Any] = None
A_ : List[str] = time.time()
A_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : List[str] = eval_loop(
__UpperCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
A_ : int = compute_metrics
A_ : Any = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A_ : Any = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
A_ : Optional[int] = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : Optional[int] = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
A_ : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A_ : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _a ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int=None , _lowerCamelCase : str = "test" , **_lowerCamelCase : Any ):
"""simple docstring"""
A_ : str = gen_kwargs.copy()
A_ : Optional[int] = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A_ : str = self.compute_metrics
A_ : Optional[Any] = None
A_ : Optional[Any] = time.time()
A_ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : Optional[Any] = eval_loop(
__UpperCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
A_ : Optional[int] = compute_metrics
A_ : str = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A_ : List[Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , '''predict''' )
A_ : List[str] = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : Optional[Any] = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Any=7 , _lowerCamelCase : int=3 , _lowerCamelCase : Optional[int]=18 , _lowerCamelCase : Any=30 , _lowerCamelCase : str=400 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _lowerCamelCase : Optional[Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _lowerCamelCase : List[Any]=True , ):
"""simple docstring"""
A_ : List[str] = size if size is not None else {"""height""": 224, """width""": 224}
A_ : str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Tuple = parent
A_ : Any = batch_size
A_ : Optional[int] = num_channels
A_ : Optional[Any] = image_size
A_ : str = min_resolution
A_ : Any = max_resolution
A_ : List[Any] = do_resize
A_ : str = size
A_ : Union[str, Any] = do_center_crop
A_ : Optional[int] = crop_size
A_ : Any = do_normalize
A_ : Optional[Any] = image_mean
A_ : str = image_std
A_ : List[str] = do_convert_rgb
def _a ( self : List[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _a ( self : List[str] , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=False , _lowerCamelCase : Dict=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A_ : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A_ : str = []
for i in range(self.batch_size ):
A_ : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A_ : List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
A_ : Optional[int] = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCamelCase_ (_UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : str = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Any ):
"""simple docstring"""
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _a ( self : Any ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class UpperCamelCase_ (_UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[str] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ )
A_ : List[str] = 3
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A_ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
A_ : str = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
'''simple docstring'''
snake_case__ = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 356 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
return 1 if input_a == input_a else 0
def snake_case__ ( ) -> str:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self : int , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Dict[str, int]] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 255 , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
A_ : List[Any] = size if size is not None else {'''shortest_edge''': 256}
A_ : Dict = get_size_dict(a_ , default_to_square=a_ )
A_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(a_ )
A_ : Any = do_resize
A_ : Union[str, Any] = size
A_ : Tuple = resample
A_ : Dict = do_center_crop
A_ : Any = crop_size
A_ : Optional[int] = do_rescale
A_ : str = rescale_factor
A_ : List[str] = do_normalize
A_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : str , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Any = get_resize_output_image_size(a_ , size=size['''shortest_edge'''] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def _a ( self : List[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
A_ : List[str] = get_size_dict(a_ )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def _a ( self : List[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _a ( self : Optional[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def _a ( self : List[Any] , _lowerCamelCase : ImageInput , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCamelCase : str , ):
"""simple docstring"""
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : List[str] = size if size is not None else self.size
A_ : List[str] = get_size_dict(a_ , default_to_square=a_ )
A_ : str = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
A_ : List[str] = get_size_dict(a_ )
A_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
A_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Dict = image_std if image_std is not None else self.image_std
A_ : str = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
A_ : Dict = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
A_ : int = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
A_ : Optional[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
A_ : Tuple = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
A_ : List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
A_ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 358 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) -> int:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : List[Any] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Union[str, Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Tuple = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) -> Union[str, Any]:
A_ : Union[str, Any] = dct.pop(lowerCAmelCase__ )
A_ : Dict = val
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> List[Any]:
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : int = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
A_ : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] ) -> List[str]:
A_ : int = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
A_ : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Union[str, Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Dict = 1_0_2_4
A_ : Optional[Any] = 4_0_9_6
A_ : Optional[Any] = 2_4
A_ : List[Any] = 1_6
A_ : Optional[Any] = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : int = False
A_ : Union[str, Any] = '''relu'''
A_ : int = 1_0_2_4
A_ : Dict = True
A_ : Union[str, Any] = False
A_ : List[str] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
A_ : int = TrOCRForCausalLM(lowerCAmelCase__ )
A_ : List[Any] = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' , check_hash=lowerCAmelCase__ )['''model''']
A_ : str = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(lowerCAmelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
A_ : Any = val
else:
A_ : int = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
A_ : Any = ViTImageProcessor(size=encoder_config.image_size )
A_ : Tuple = RobertaTokenizer.from_pretrained('''roberta-large''' )
A_ : List[Any] = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : List[Any] = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
A_ : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Tuple = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
A_ : int = outputs.logits
A_ : Any = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Tuple = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Any = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
snake_case__ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str=None ) -> Union[str, Any]:
if rng is None:
A_ : List[Any] = random.Random()
A_ : Any = 1
for dim in shape:
total_dims *= dim
A_ : Tuple = []
for _ in range(lowercase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A_ : Optional[Any] = np.array(lowercase_ , dtype=jnp.intaa ).reshape(lowercase_ )
return output
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Dict=None ) -> str:
A_ : Dict = ids_tensor(lowercase_ , vocab_size=2 , rng=lowercase_ )
# make sure that at least one token is attended to for each batch
A_ : str = 1
return attn_mask
@require_flax
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = ()
def _a ( self : int ):
"""simple docstring"""
A_ ,A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A_ : Tuple = 2
A_ : str = inputs['''input_ids'''].shape[-1] // 2
A_ : str = inputs['''input_ids'''][:max_batch_size, :sequence_length]
A_ : Optional[int] = jnp.ones_like(_lowerCamelCase )
A_ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A_ : List[str] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A_ : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Any = self._get_input_ids_and_config()
A_ : Union[str, Any] = False
A_ : Any = max_length
A_ : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ : str = getattr(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = pt_model_class(_lowerCamelCase ).eval()
A_ : List[str] = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params )
A_ : List[Any] = flax_model.generate(_lowerCamelCase ).sequences
A_ : List[Any] = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A_ : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : str ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : str = self._get_input_ids_and_config()
A_ : Any = False
A_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(_lowerCamelCase )
A_ : Union[str, Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Union[str, Any] = jit(model.generate )
A_ : Dict = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Tuple = self._get_input_ids_and_config()
A_ : List[Any] = True
A_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A_ : Any = model_class(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Tuple = jit(model.generate )
A_ : List[str] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config()
A_ : str = False
A_ : Any = max_length
A_ : Any = 2
for model_class in self.all_generative_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Optional[int] = jit(model.generate )
A_ : Dict = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : List[Any] = self._get_input_ids_and_config()
A_ : Dict = False
A_ : str = max_length
A_ : str = 2
A_ : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
A_ : int = model_class(_lowerCamelCase )
A_ : Dict = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : List[str] = True
A_ : List[str] = max_length
A_ : str = 0.8
A_ : Dict = 10
A_ : Union[str, Any] = 0.3
A_ : int = 1
A_ : Any = 8
A_ : Dict = 9
for model_class in self.all_generative_model_classes:
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Dict = jit(model.generate )
A_ : Optional[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : int = max_length
A_ : str = 1
A_ : str = 8
A_ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : str = jit(model.generate )
A_ : List[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config()
A_ : Optional[int] = max_length
A_ : Optional[int] = 2
A_ : str = 1
A_ : Any = 8
A_ : str = 9
for model_class in self.all_generative_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : List[Any] = jit(model.generate )
A_ : Optional[int] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : List[str] = attention_mask.at[(0, 0)].set(0 )
A_ : List[str] = False
A_ : int = max_length
for model_class in self.all_generative_model_classes:
A_ : Tuple = model_class(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Any = jit(model.generate )
A_ : Union[str, Any] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : List[str] = attention_mask.at[(0, 0)].set(0 )
A_ : List[Any] = True
A_ : Any = max_length
for model_class in self.all_generative_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : int = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Optional[Any] = jit(model.generate )
A_ : Tuple = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : str ):
"""simple docstring"""
A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Dict = attention_mask.at[(0, 0)].set(0 )
A_ : List[Any] = 2
A_ : str = max_length
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Dict = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : List[str] = jit(model.generate )
A_ : Optional[Any] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
A_ : Tuple = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ : Any = '''Hello world'''
A_ : int = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ):
model.generate(_lowerCamelCase , do_samples=_lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ):
A_ : Optional[int] = {'''foo''': '''bar'''}
model.generate(_lowerCamelCase , **_lowerCamelCase )
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 0 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=None ) -> int:
require_version(deps[pkg] , UpperCamelCase__ )
| 362 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A_ : int = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
A_ : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
A_ : str = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
A_ : int = tempfile.mkdtemp()
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : str = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
A_ : Optional[int] = "hf-internal-testing/ngram-beam-search-decoder"
def _a ( self : List[str] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , **_lowerCamelCase : List[str] ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , **_lowerCamelCase : Any ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_SCREAMING_SNAKE_CASE )
def _a ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[int] = self.get_feature_extractor()
A_ : List[Any] = self.get_decoder()
A_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _SCREAMING_SNAKE_CASE )
def _a ( self : str ):
"""simple docstring"""
A_ : int = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.get_feature_extractor()
A_ : List[Any] = self.get_tokenizer()
A_ : str = self.get_decoder()
A_ : int = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : int = floats_list((3, 1000) )
A_ : Union[str, Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = self.get_feature_extractor()
A_ : Dict = self.get_tokenizer()
A_ : Dict = self.get_decoder()
A_ : Dict = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : int = "This is a test string"
A_ : Dict = processor(text=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Dict , _lowerCamelCase : int=(2, 10, 16) , _lowerCamelCase : List[Any]=77 ):
"""simple docstring"""
np.random.seed(_SCREAMING_SNAKE_CASE )
return np.random.rand(*_SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = self.get_feature_extractor()
A_ : List[str] = self.get_tokenizer()
A_ : Dict = self.get_decoder()
A_ : int = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _a ( self : List[Any] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = self.get_decoder()
A_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A_ : Any = processor.batch_decode(_SCREAMING_SNAKE_CASE )
else:
with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool:
A_ : Union[str, Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
A_ : List[Any] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.logit_score )
self.assertListEqual(_SCREAMING_SNAKE_CASE , decoded_processor.lm_score )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : List[Any] = self.get_decoder()
A_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : List[str] = self._get_dummy_logits()
A_ : int = 15
A_ : Optional[int] = -20.0
A_ : Any = -4.0
A_ : Optional[int] = processor.batch_decode(
_SCREAMING_SNAKE_CASE , beam_width=_SCREAMING_SNAKE_CASE , beam_prune_logp=_SCREAMING_SNAKE_CASE , token_min_logp=_SCREAMING_SNAKE_CASE , )
A_ : List[str] = decoded_processor_out.text
A_ : Tuple = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
A_ : Any = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , beam_width=_SCREAMING_SNAKE_CASE , beam_prune_logp=_SCREAMING_SNAKE_CASE , token_min_logp=_SCREAMING_SNAKE_CASE , )
A_ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
A_ : List[str] = [d[0][2] for d in decoded_decoder_out]
A_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = self.get_feature_extractor()
A_ : int = self.get_tokenizer()
A_ : Any = self.get_decoder()
A_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self._get_dummy_logits()
A_ : int = 2.0
A_ : Optional[int] = 5.0
A_ : Dict = -20.0
A_ : Dict = True
A_ : Union[str, Any] = processor.batch_decode(
_SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , unk_score_offset=_SCREAMING_SNAKE_CASE , lm_score_boundary=_SCREAMING_SNAKE_CASE , )
A_ : Dict = decoded_processor_out.text
A_ : int = list(_SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , unk_score_offset=_SCREAMING_SNAKE_CASE , lm_score_boundary=_SCREAMING_SNAKE_CASE , )
with get_context('''fork''' ).Pool() as pool:
A_ : str = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
A_ : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _SCREAMING_SNAKE_CASE )
A_ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _SCREAMING_SNAKE_CASE )
def _a ( self : Any ):
"""simple docstring"""
A_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : str = processor.decoder.model_container[processor.decoder._model_key]
A_ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A_ : int = os.listdir(_SCREAMING_SNAKE_CASE )
A_ : str = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
A_ : int = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
A_ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A_ : Any = os.listdir(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = os.listdir(_SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : int = floats_list((3, 1000) )
A_ : List[str] = processor_wavaveca(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : Optional[int] = processor_auto(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
A_ : Optional[int] = self._get_dummy_logits()
A_ : Optional[int] = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _a ( self : Any ):
"""simple docstring"""
A_ : int = self.get_feature_extractor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = self.get_decoder()
A_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _a ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : Optional[Any] = self._get_dummy_logits()[0]
A_ : int = processor.decode(_SCREAMING_SNAKE_CASE , output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _a ( self : str ):
"""simple docstring"""
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : List[str] = self._get_dummy_logits()
A_ : Optional[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE , output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self : Dict ):
"""simple docstring"""
import torch
A_ : str = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_SCREAMING_SNAKE_CASE )
A_ : int = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
A_ : Dict = iter(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = next(_SCREAMING_SNAKE_CASE )
A_ : Tuple = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
A_ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A_ : List[str] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
A_ : int = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
A_ : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A_ : List[str] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
A_ : int = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , '''word''' ) ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE , '''word''' ) ) , output.text )
# output times
A_ : int = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE , '''start_time''' ) )
A_ : Optional[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE , '''end_time''' ) )
# fmt: off
A_ : int = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
A_ : List[str] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=0.01 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=0.01 ) )
| 363 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase_ (snake_case__ ):
"""simple docstring"""
_lowerCAmelCase = 'roformer'
def __init__( self : Tuple , _lowerCamelCase : List[Any]=50000 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : int=12 , _lowerCamelCase : str=12 , _lowerCamelCase : Tuple=3072 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Optional[int]=1536 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Any=1E-12 , _lowerCamelCase : str=0 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=True , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
A_ : Union[str, Any] = vocab_size
A_ : int = hidden_size if embedding_size is None else embedding_size
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Any = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : int = initializer_range
A_ : int = layer_norm_eps
A_ : str = rotary_value
A_ : Optional[Any] = use_cache
class UpperCamelCase_ (snake_case__ ):
"""simple docstring"""
@property
def _a ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ : List[Any] = {0: "batch", 1: "sequence"}
A_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(UpperCamelCase__ ) < 2:
return collection
def circle_sort_util(lowerCamelCase__ : list , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> bool:
A_ : Optional[Any] = False
if low == high:
return swapped
A_ : Optional[Any] = low
A_ : Tuple = high
while left < right:
if collection[left] > collection[right]:
A_ ,A_ : Tuple = (
collection[right],
collection[left],
)
A_ : List[str] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A_ ,A_ : str = (
collection[right + 1],
collection[left],
)
A_ : List[Any] = True
A_ : Optional[Any] = low + int((high - low) / 2 )
A_ : Union[str, Any] = circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ : Any = circle_sort_util(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
return swapped or left_swap or right_swap
A_ : str = True
while is_not_sorted is True:
A_ : Tuple = circle_sort_util(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 )
return collection
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 366 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : Union[str, Any] = 0
A_ : Optional[Any] = len(__lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Dict = i + 1
else:
A_ : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 367 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'donut-swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Dict=96 , _lowerCamelCase : Tuple=[2, 2, 6, 2] , _lowerCamelCase : Union[str, Any]=[3, 6, 12, 24] , _lowerCamelCase : Dict=7 , _lowerCamelCase : List[Any]=4.0 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : int="gelu" , _lowerCamelCase : List[str]=False , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1E-5 , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**_a )
A_ : Optional[int] = image_size
A_ : Any = patch_size
A_ : Tuple = num_channels
A_ : Dict = embed_dim
A_ : Dict = depths
A_ : int = len(_a )
A_ : str = num_heads
A_ : Tuple = window_size
A_ : Dict = mlp_ratio
A_ : List[str] = qkv_bias
A_ : Any = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Union[str, Any] = drop_path_rate
A_ : List[Any] = hidden_act
A_ : List[Any] = use_absolute_embeddings
A_ : Union[str, Any] = layer_norm_eps
A_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : str = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _a ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
A_ : Any = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _a ( self : Any ):
"""simple docstring"""
A_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ : int = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A_ : List[Any] = DDPMScheduler()
A_ : Tuple = AudioDiffusionPipeline(vqvae=_a , unet=self.dummy_unet , mel=_a , scheduler=_a )
A_ : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = torch.Generator(device=_a ).manual_seed(42 )
A_ : Dict = pipe(generator=_a , steps=4 )
A_ : int = output.audios[0]
A_ : Union[str, Any] = output.images[0]
A_ : Optional[int] = torch.Generator(device=_a ).manual_seed(42 )
A_ : List[str] = pipe(generator=_a , steps=4 , return_dict=_a )
A_ : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A_ : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
A_ : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A_ : int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A_ : Dict = DDIMScheduler()
A_ : Optional[Any] = self.dummy_vqvae_and_unet
A_ : Any = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_a , scheduler=_a )
A_ : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
A_ : Union[str, Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A_ : int = torch.Generator(device=_a ).manual_seed(42 )
A_ : List[str] = pipe(raw_audio=_a , generator=_a , start_step=5 , steps=10 )
A_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A_ : Union[str, Any] = self.dummy_unet_condition
A_ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_a , mel=_a , scheduler=_a )
A_ : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
A_ : Optional[int] = torch.rand((1, 1, 10) )
A_ : Tuple = pipe(generator=_a , encoding=_a )
A_ : Tuple = output.images[0]
A_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = torch_device
A_ : Optional[Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
A_ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : str = torch.Generator(device=_a ).manual_seed(42 )
A_ : str = pipe(generator=_a )
A_ : Union[str, Any] = output.audios[0]
A_ : Dict = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A_ : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (_a, _a, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = CycleDiffusionPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : Union[str, Any] = CLIPTextModel(snake_case_ )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str=0 ):
"""simple docstring"""
A_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : List[str] = image / 2 + 0.5
if str(snake_case_ ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(snake_case_ )
else:
A_ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : str = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
A_ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A_ : Tuple = self.get_dummy_inputs(snake_case_ )
A_ : Any = pipe(**snake_case_ )
A_ : Tuple = output.images
A_ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : List[Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , '''half''' ):
A_ : Optional[int] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
A_ : Union[str, Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A_ : int = self.get_dummy_inputs(snake_case_ )
A_ : List[Any] = pipe(**snake_case_ )
A_ : Any = output.images
A_ : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _a ( self : int ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self : Dict ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self : int ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self : Optional[int] ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
A_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
A_ : Tuple = init_image.resize((512, 512) )
A_ : List[Any] = """CompVis/stable-diffusion-v1-4"""
A_ : Tuple = DDIMScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
A_ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : int = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : Dict = torch.manual_seed(0 )
A_ : Dict = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='''np''' , )
A_ : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a ( self : int ):
"""simple docstring"""
A_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
A_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
A_ : Dict = init_image.resize((512, 512) )
A_ : Tuple = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
A_ : str = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : Tuple = """A black colored car"""
A_ : List[Any] = """A blue colored car"""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='''np''' , )
A_ : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class UpperCamelCase_ (__lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = """codegen"""
_lowerCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , _lowerCamelCase : List[Any]=50400 , _lowerCamelCase : List[str]=2048 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : List[Any]=4096 , _lowerCamelCase : Union[str, Any]=28 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : List[str]=64 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]="gelu_new" , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : str=1E-5 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : int=True , _lowerCamelCase : Optional[Any]=50256 , _lowerCamelCase : Any=50256 , _lowerCamelCase : Optional[Any]=False , **_lowerCamelCase : Dict , ):
"""simple docstring"""
A_ : List[Any] = vocab_size
A_ : Optional[Any] = n_ctx
A_ : Optional[int] = n_positions
A_ : Optional[int] = n_embd
A_ : Any = n_layer
A_ : List[Any] = n_head
A_ : List[str] = n_inner
A_ : Tuple = rotary_dim
A_ : Union[str, Any] = activation_function
A_ : Any = resid_pdrop
A_ : int = embd_pdrop
A_ : Tuple = attn_pdrop
A_ : Dict = layer_norm_epsilon
A_ : int = initializer_range
A_ : Dict = use_cache
A_ : Tuple = bos_token_id
A_ : int = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase )
class UpperCamelCase_ (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : str = "default" , _lowerCamelCase : List[PatchingSpec] = None , _lowerCamelCase : bool = False , ):
"""simple docstring"""
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase )
if not getattr(self._config , '''pad_token_id''' , __lowercase ):
# TODO: how to do that better?
A_ : Dict = 0
@property
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
A_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _a ( self : int ):
"""simple docstring"""
return self._config.n_layer
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self._config.n_head
def _a ( self : Optional[Any] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : Optional[int] = super(__lowercase , self ).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
# We need to order the input in the way they appears in the forward()
A_ : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : Optional[Any] = seqlen + 2
A_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ : Any = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
A_ : List[Any] = common_inputs['''attention_mask''']
if self.use_past:
A_ : List[Any] = ordered_inputs['''attention_mask'''].dtype
A_ : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
return ordered_inputs
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 13
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def snake_case__ ( lowerCamelCase__ : str ) -> Tuple:
A_ : List[str] = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
A_ : str = tf.convert_to_tensor(_UpperCAmelCase )
A_ : Optional[int] = tf.cast(math.pi , x.dtype )
A_ : Union[str, Any] = tf.cast(0.044715 , x.dtype )
A_ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCAmelCase , 3 )) ))
return x * cdf
def snake_case__ ( lowerCamelCase__ : List[str] ) -> Tuple:
A_ : Optional[Any] = tf.convert_to_tensor(_UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(_UpperCAmelCase ) )
def snake_case__ ( lowerCamelCase__ : str ) -> int:
A_ : Optional[int] = tf.convert_to_tensor(_UpperCAmelCase )
A_ : List[str] = tf.cast(0.044715 , x.dtype )
A_ : int = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> int:
A_ : Tuple = tf.convert_to_tensor(_UpperCAmelCase )
A_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> str:
return tf.clip_by_value(_gelu(_UpperCAmelCase ) , -1_0 , 1_0 )
def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=-1 ) -> Optional[Any]:
A_ : List[str] = tf.split(_UpperCAmelCase , 2 , axis=_UpperCAmelCase )
return a * tf.math.sigmoid(_UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]:
return tf.keras.activations.gelu(_UpperCAmelCase , approximate=_UpperCAmelCase )
snake_case__ = tf.keras.activations.gelu
snake_case__ = approximate_gelu_wrap
else:
snake_case__ = _gelu
snake_case__ = _gelu_new
snake_case__ = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase_ (lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] = None , _lowerCamelCase : List[str] = None , _lowerCamelCase : int = None , _lowerCamelCase : Optional[int] = False , _lowerCamelCase : int = False , _lowerCamelCase : List[str] = None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
A_ : Optional[int] = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ : Dict = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _a ( self : List[str] ):
"""simple docstring"""
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : Union[str, Any] = None
A_ : List[Any] = None
A_ : Optional[Any] = None
A_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 351 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 0 |
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Any:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_lowerCAmelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class UpperCamelCase_ (lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = "autoformer"
_lowerCAmelCase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple , _lowerCamelCase : Dict = None , _lowerCamelCase : List[str] = None , _lowerCamelCase : Any = "student_t" , _lowerCamelCase : Optional[int] = "nll" , _lowerCamelCase : str = 1 , _lowerCamelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7] , _lowerCamelCase : Union[str, Any] = True , _lowerCamelCase : Union[str, Any] = 0 , _lowerCamelCase : int = 0 , _lowerCamelCase : Optional[int] = 0 , _lowerCamelCase : int = 0 , _lowerCamelCase : List[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Tuple = 64 , _lowerCamelCase : Dict = 2 , _lowerCamelCase : str = 2 , _lowerCamelCase : Any = 2 , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 32 , _lowerCamelCase : Any = 32 , _lowerCamelCase : int = "gelu" , _lowerCamelCase : List[str] = 0.1 , _lowerCamelCase : str = 0.1 , _lowerCamelCase : Optional[Any] = 0.1 , _lowerCamelCase : Optional[Any] = 0.1 , _lowerCamelCase : Optional[int] = 0.1 , _lowerCamelCase : Dict = 100 , _lowerCamelCase : Optional[Any] = 0.02 , _lowerCamelCase : Union[str, Any] = True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[Any] = 10 , _lowerCamelCase : int = 25 , _lowerCamelCase : Optional[Any] = 3 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Any = prediction_length
A_ : List[str] = context_length if context_length is not None else prediction_length
A_ : List[str] = distribution_output
A_ : Union[str, Any] = loss
A_ : int = input_size
A_ : List[str] = num_time_features
A_ : str = lags_sequence
A_ : List[str] = scaling
A_ : Union[str, Any] = num_dynamic_real_features
A_ : List[str] = num_static_real_features
A_ : Tuple = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
A_ : Tuple = cardinality
else:
A_ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
A_ : Dict = embedding_dimension
else:
A_ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Any = num_parallel_samples
# Transformer architecture configuration
A_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : int = d_model
A_ : Tuple = encoder_attention_heads
A_ : Union[str, Any] = decoder_attention_heads
A_ : Tuple = encoder_ffn_dim
A_ : Any = decoder_ffn_dim
A_ : Dict = encoder_layers
A_ : Dict = decoder_layers
A_ : Optional[int] = dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Union[str, Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Tuple = use_cache
# Autoformer
A_ : Optional[int] = label_length
A_ : Tuple = moving_average
A_ : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case__ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class UpperCamelCase_ (tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str = " " ):
"""simple docstring"""
A_ : Optional[Any] = sentence_delimiter
def _a ( self : List[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return list(_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Tuple = []
for sent_idx, sentence in enumerate(_lowerCamelCase ):
chars.extend(self.process_string(_lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case__ = """\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"""
snake_case__ = """\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"""
snake_case__ = """\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _a ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[str]=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
_lowerCamelCase , _lowerCamelCase , truth_transform=_lowerCamelCase , hypothesis_transform=_lowerCamelCase , )["wer"]
A_ : Dict = 0
A_ : Union[str, Any] = 0
for prediction, reference in zip(_lowerCamelCase , _lowerCamelCase ):
A_ : str = jiwer.compute_measures(
_lowerCamelCase , _lowerCamelCase , truth_transform=_lowerCamelCase , hypothesis_transform=_lowerCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase_ (TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : List[str]=None , **_lowerCamelCase : int ):
"""simple docstring"""
super().__init__(features=lowerCamelCase__ )
A_ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def _a ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
A_ : int = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
A_ : Optional[int] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '''__array__''' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
A_ : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def _a ( self : int , _lowerCamelCase : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def _a ( self : Union[str, Any] , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : int = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
A_ : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def _a ( self : str , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
A_ : str = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
A_ : int = self.recursive_tensorize(lowerCamelCase__ )
A_ : Tuple = self._consolidate(lowerCamelCase__ )
return column
def _a ( self : Tuple , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : Dict = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
A_ : Any = self.python_features_decoder.decode_batch(lowerCamelCase__ )
A_ : Tuple = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
A_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 356 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case__ ( ) -> int:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def snake_case__ ( ) -> str:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def snake_case__ ( ) -> str:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
snake_case__ = 'naver-clova-ix/donut-base'
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Any = DonutProcessor.from_pretrained(_lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
A_ : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
A_ : Any = self.processor.tokenajson(_lowerCamelCase )
self.assertDictEqual(_lowerCamelCase , _lowerCamelCase )
| 358 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ) -> List[str]:
# Load configuration defined in the metadata file
with open(a__ ) as metadata_file:
A_ : Dict = json.load(a__ )
A_ : Optional[Any] = LukeConfig(use_entity_aware_attention=a__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
A_ : List[Any] = torch.load(a__ , map_location='''cpu''' )
# Load the entity vocab file
A_ : str = load_entity_vocab(a__ )
A_ : Any = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
A_ : Optional[Any] = AddedToken('''<ent>''' , lstrip=a__ , rstrip=a__ )
A_ : List[Any] = AddedToken('''<ent2>''' , lstrip=a__ , rstrip=a__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a__ )
with open(os.path.join(a__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(a__ , a__ )
A_ : List[str] = LukeTokenizer.from_pretrained(a__ )
# Initialize the embeddings of the special tokens
A_ : List[Any] = state_dict['''embeddings.word_embeddings.weight''']
A_ : List[str] = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
A_ : int = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
A_ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ : Tuple = f'encoder.layer.{layer_index}.attention.self.'
A_ : Any = state_dict[prefix + matrix_name]
A_ : Dict = state_dict[prefix + matrix_name]
A_ : int = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ : Dict = state_dict['''entity_embeddings.entity_embeddings.weight''']
A_ : str = entity_emb[entity_vocab['''[MASK]''']]
A_ : List[str] = LukeModel(config=a__ ).eval()
A_ ,A_ : List[Any] = model.load_state_dict(a__ , strict=a__ )
if not (len(a__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(a__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A_ : int = LukeTokenizer.from_pretrained(a__ , task='''entity_classification''' )
A_ : Optional[Any] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
A_ : List[Any] = (3_9, 4_2)
A_ : List[str] = tokenizer(a__ , entity_spans=[span] , add_prefix_space=a__ , return_tensors='''pt''' )
A_ : List[str] = model(**a__ )
# Verify word hidden states
if model_size == "large":
A_ : List[str] = torch.Size((1, 4_2, 1_0_2_4) )
A_ : Tuple = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ : Any = torch.Size((1, 4_2, 7_6_8) )
A_ : Dict = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ : Optional[Any] = torch.Size((1, 1, 1_0_2_4) )
A_ : List[Any] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ : Union[str, Any] = torch.Size((1, 1, 7_6_8) )
A_ : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a__ ) )
model.save_pretrained(a__ )
def snake_case__ ( lowerCamelCase__ : int ) -> str:
A_ : List[str] = {}
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(a__ ):
A_ ,A_ : List[Any] = line.rstrip().split('''\t''' )
A_ : Union[str, Any] = index
return entity_vocab
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
snake_case__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case__ ( lowerCamelCase__ : int ) -> str:
# getting number of pixels in the image
A_ ,A_ : Optional[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
A_ : List[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
snake_case__ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
snake_case__ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 360 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (UpperCamelCase_, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ConsistencyModelPipeline
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_lowerCAmelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[str] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _a ( self : str , _lowerCamelCase : Dict=False ):
"""simple docstring"""
if class_cond:
A_ : List[Any] = self.dummy_cond_unet
else:
A_ : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
A_ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _a ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Any=0 ):
"""simple docstring"""
if str(_a ).startswith('''mps''' ):
A_ : str = torch.manual_seed(_a )
else:
A_ : List[str] = torch.Generator(device=_a ).manual_seed(_a )
A_ : Tuple = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : List[Any] = ConsistencyModelPipeline(**_a )
A_ : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_dummy_inputs(_a )
A_ : Union[str, Any] = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : str ):
"""simple docstring"""
A_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components(class_cond=_a )
A_ : Optional[int] = ConsistencyModelPipeline(**_a )
A_ : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = self.get_dummy_inputs(_a )
A_ : str = 0
A_ : str = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
A_ : str = image[0, -3:, -3:, -1]
A_ : str = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : Optional[Any] = ConsistencyModelPipeline(**_a )
A_ : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Dict = self.get_dummy_inputs(_a )
A_ : int = 1
A_ : List[Any] = None
A_ : Any = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
A_ : Any = image[0, -3:, -3:, -1]
A_ : Optional[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components(class_cond=_a )
A_ : Dict = ConsistencyModelPipeline(**_a )
A_ : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : int = self.get_dummy_inputs(_a )
A_ : Dict = 1
A_ : Optional[Any] = None
A_ : Any = 0
A_ : List[Any] = pipe(**_a ).images
assert image.shape == (1, 32, 32, 3)
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[str] , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Dict="cpu" , _lowerCamelCase : Union[str, Any]=torch.floataa , _lowerCamelCase : Union[str, Any]=(1, 3, 64, 64) ):
"""simple docstring"""
A_ : List[str] = torch.manual_seed(_a )
A_ : str = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
A_ : Union[str, Any] = self.get_fixed_latents(seed=_a , device=_a , dtype=_a , shape=_a )
A_ : Dict = latents
return inputs
def _a ( self : Any , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Optional[Any]="cpu" , _lowerCamelCase : List[str]=torch.floataa , _lowerCamelCase : str=(1, 3, 64, 64) ):
"""simple docstring"""
if type(_a ) == str:
A_ : List[Any] = torch.device(_a )
A_ : int = torch.Generator(device=_a ).manual_seed(_a )
A_ : int = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
return latents
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
A_ : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
A_ : Optional[Any] = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = self.get_inputs()
A_ : Union[str, Any] = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Dict = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
A_ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
A_ : List[str] = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a )
pipe.set_progress_bar_config(disable=_a )
A_ : int = self.get_inputs()
A_ : Optional[Any] = 1
A_ : Union[str, Any] = None
A_ : Optional[int] = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
A_ : Optional[Any] = image[0, -3:, -3:, -1]
A_ : str = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
A_ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
A_ : Dict = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = self.get_inputs(get_fixed_latents=_a , device=_a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ):
A_ : Optional[Any] = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : List[str] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
A_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
A_ : str = ConsistencyModelPipeline(unet=_a , scheduler=_a )
pipe.to(torch_device=_a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_a )
A_ : str = self.get_inputs(get_fixed_latents=_a , device=_a )
A_ : List[str] = 1
A_ : List[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_a , enable_math=_a , enable_mem_efficient=_a ):
A_ : Tuple = pipe(**_a ).images
assert image.shape == (1, 64, 64, 3)
A_ : Any = image[0, -3:, -3:, -1]
A_ : Tuple = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ (_A ):
"""simple docstring"""
_lowerCAmelCase = field(
default=0.0, metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'Whether to SortishSamler or not.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'whether to use adafactor'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'Dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default='linear', metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''}, )
| 362 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : Union[str, Any] = os.path.join(args.tf_model_dir , '''parameters.json''' )
A_ : Tuple = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('''.pt''' ):
A_ : Dict = args.output + ".pt"
A_ : str = OrderedDict()
with tf.device('''/CPU:0''' ):
A_ : int = tf.train.load_checkpoint(args.tf_model_dir )
A_ : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A_ : Any = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A_ : Optional[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A_ : Tuple = 8
A_ : Optional[int] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Tuple = torch.tensor(__snake_case )
elif key_name.startswith('''model/moe''' ):
A_ : Union[str, Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A_ : Tuple = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
A_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Optional[Any] = torch.tensor(__snake_case )
elif key_name.endswith('''/softmlp/kernel''' ):
A_ : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
A_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : str = torch.tensor(__snake_case )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A_ : Tuple = key_name[-9:-7]
for i in range(1_6 ):
A_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
A_ : List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A_ : Dict = torch.tensor(__snake_case )
elif key_name.startswith('''model/mlp''' ):
A_ : Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A_ : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
A_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Dict = torch.tensor(__snake_case )
elif key_name.endswith('''/p1/bias''' ):
A_ : Tuple = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
A_ : Optional[Any] = vnp.copy() # same because it is one dimensional
A_ : Optional[Any] = torch.tensor(__snake_case )
elif key_name.endswith('''/p2/kernel''' ):
A_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
A_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Tuple = torch.tensor(__snake_case )
elif key_name.endswith('''/p2/bias''' ):
A_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
A_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
A_ : Tuple = torch.tensor(__snake_case )
elif key_name.startswith('''model/ln''' ):
A_ : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A_ : List[str] = "model.blocks.%d.feed_forward.norm.bias" % player
A_ : List[str] = vnp.copy() # same because it is one dimensional
A_ : Tuple = torch.tensor(__snake_case )
elif key_name.endswith('''/g''' ):
A_ : Optional[int] = "model.blocks.%d.feed_forward.norm.weight" % player
A_ : Optional[int] = vnp.copy() # same because it is one dimensional
A_ : List[str] = torch.tensor(__snake_case )
elif key_name.startswith('''model/att''' ):
A_ : Optional[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A_ : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A_ : str = state[:, 0, :, :]
A_ : Union[str, Any] = state[:, 1, :, :]
A_ : List[Any] = state[:, 2, :, :]
A_ : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : Dict = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
A_ : Tuple = torch.tensor(__snake_case )
A_ : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
A_ : str = torch.tensor(__snake_case )
A_ : int = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
A_ : Union[str, Any] = torch.tensor(__snake_case )
elif key_name.endswith('''/o/kernel''' ):
A_ : Tuple = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
A_ : List[str] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : str = torch.tensor(__snake_case )
elif key_name.startswith('''model/an''' ):
A_ : Dict = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A_ : str = "model.blocks.%d.self_attn.norm.bias" % player
A_ : str = vnp.copy() # same because it is one dimensional
A_ : Optional[int] = torch.tensor(__snake_case )
elif key_name.endswith('''/g''' ):
A_ : str = "model.blocks.%d.self_attn.norm.weight" % player
A_ : Dict = vnp.copy() # same because it is one dimensional
A_ : List[Any] = torch.tensor(__snake_case )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A_ : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
A_ : Dict = "model.%s.weight" % nlayer
A_ : List[Any] = vnp.copy() # same in embedded
A_ : Tuple = torch.tensor(__snake_case )
if key_name.startswith('''model/wte''' ):
A_ : Any = "lm_head.weight"
A_ : Optional[int] = vnp.copy() # same in embedded
A_ : str = torch.tensor(__snake_case )
elif key_name.startswith('''model/wob''' ):
A_ : Any = "final_logits_bias"
A_ : Union[str, Any] = vnp.copy() # same in embedded
A_ : Dict = state.reshape((1, -1) )
A_ : int = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
A_ : Optional[int] = "model.last_project.weight"
A_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : List[Any] = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
A_ : Optional[int] = "model.last_project.bias"
A_ : Tuple = vnp.copy() # same because it is one dimensional
A_ : Tuple = torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
snake_case__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 363 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 1_0, '''max_num_jobs''': 1}, [range(1_0 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 1_0}, [range(_a , i + 1 ) for i in range(1_0 )]),
({'''num_shards''': 1, '''max_num_jobs''': 1_0}, [range(1 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({'''num_shards''': 3, '''max_num_jobs''': 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> Tuple:
A_ : str = _distribute_shards(**_a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 1_0, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> Any:
A_ : List[Any] = _split_gen_kwargs(_a , _a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(_a ):
_number_of_shards_in_gen_kwargs(_a )
else:
A_ : Any = _number_of_shards_in_gen_kwargs(_a )
assert out == expected
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 0 |
'''simple docstring'''
from collections import defaultdict
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) -> bool:
A_ : str = first_str.lower().strip()
A_ : Optional[int] = second_str.lower().strip()
# Remove whitespace
A_ : List[Any] = first_str.replace(''' ''' , '''''' )
A_ : Optional[int] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
return False
# Default values for count should be 0
A_ : List[str] = defaultdict(SCREAMING_SNAKE_CASE_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = input("""Enter the first string """).strip()
snake_case__ = input("""Enter the second string """).strip()
snake_case__ = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class UpperCamelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(_UpperCAmelCase )
def __call__( self : Tuple , _lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : Union[str, Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return {}, {}, {}
def _a ( self : Tuple , _lowerCamelCase : int ):
"""simple docstring"""
A_ : List[Any] = load_image(_UpperCAmelCase )
A_ : str = image.size
A_ : Optional[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def _a ( self : List[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[Any] = self.model(**_UpperCAmelCase )
return model_outputs
def _a ( self : Optional[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Optional[int] = model_outputs.predicted_depth
A_ : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_UpperCAmelCase )
A_ : Tuple = prediction.squeeze().cpu().numpy()
A_ : Dict = (output * 255 / np.max(_UpperCAmelCase )).astype('''uint8''' )
A_ : List[str] = Image.fromarray(_UpperCAmelCase )
A_ : List[Any] = {}
A_ : Union[str, Any] = predicted_depth
A_ : Dict = depth
return output_dict
| 366 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class UpperCamelCase_ (_a ):
"""simple docstring"""
_lowerCAmelCase = """deta"""
_lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : Union[str, Any]=900 , _lowerCamelCase : List[Any]=2048 , _lowerCamelCase : List[str]=6 , _lowerCamelCase : Optional[int]=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : List[Any]=1024 , _lowerCamelCase : int=8 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Dict="relu" , _lowerCamelCase : str=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : int=1.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str=5 , _lowerCamelCase : int=4 , _lowerCamelCase : str=4 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]=300 , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Any=2 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : int=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Dict=2 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Union[str, Any]=0.25 , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : List[str] = CONFIG_MAPPING["""resnet"""](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A_ : Any = backbone_config.pop('''model_type''' )
A_ : Any = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(__lowerCamelCase )
A_ : Optional[Any] = backbone_config
A_ : List[Any] = num_queries
A_ : str = max_position_embeddings
A_ : Optional[Any] = d_model
A_ : Union[str, Any] = encoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : str = encoder_attention_heads
A_ : str = decoder_ffn_dim
A_ : int = decoder_layers
A_ : Tuple = decoder_attention_heads
A_ : str = dropout
A_ : Tuple = attention_dropout
A_ : Tuple = activation_dropout
A_ : Dict = activation_function
A_ : Optional[int] = init_std
A_ : Union[str, Any] = init_xavier_std
A_ : Optional[int] = encoder_layerdrop
A_ : List[str] = auxiliary_loss
A_ : Optional[Any] = position_embedding_type
# deformable attributes
A_ : Optional[int] = num_feature_levels
A_ : Union[str, Any] = encoder_n_points
A_ : str = decoder_n_points
A_ : Any = two_stage
A_ : int = two_stage_num_proposals
A_ : Tuple = with_box_refine
A_ : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A_ : List[Any] = class_cost
A_ : Optional[int] = bbox_cost
A_ : str = giou_cost
# Loss coefficients
A_ : int = mask_loss_coefficient
A_ : Any = dice_loss_coefficient
A_ : Optional[Any] = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : Optional[int] = eos_coefficient
A_ : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.backbone_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 367 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
snake_case__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
snake_case__ = []
snake_case__ = []
snake_case__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
snake_case__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"""emoji""": True,
},
}
]
snake_case__ = 0
for log in Path().glob("""*.log"""):
snake_case__ = 0
with open(log, """r""") as f:
for line in f:
snake_case__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
snake_case__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
snake_case__ = F'{line["duration"]:.4f}'
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
snake_case__ = []
log.unlink()
snake_case__ = """"""
snake_case__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
snake_case__ = []
snake_case__ = {}
for test in failed_tests:
snake_case__ = test[0].split("""::""")
snake_case__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
snake_case__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
snake_case__ = [test[0] for test in failed_table]
snake_case__ = list(set(files))
# Count number of instances in failed_tests
snake_case__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
snake_case__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
snake_case__ = """Too many failed tests, please see the full report in the Action results."""
snake_case__ = len(err) + 10
snake_case__ = message[: 30_00 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
snake_case__ = """No failed tests! 🤗"""
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
snake_case__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
snake_case__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
snake_case__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
snake_case__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
snake_case__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
snake_case__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
snake_case__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
snake_case__ = row[0]
else:
snake_case__ = """"""
snake_case__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
@experimental
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return _map_with_joblib(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[str]:
A_ : Any = num_proc if num_proc <= len(_lowerCamelCase ) else len(_lowerCamelCase )
A_ : str = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase ):
A_ : Dict = len(_lowerCamelCase ) // num_proc
A_ : Union[str, Any] = len(_lowerCamelCase ) % num_proc
A_ : Tuple = div * index + min(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(_lowerCamelCase )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(_lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
A_ : Optional[Any] = None, None
if not disable_tqdm:
A_ : Optional[int] = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase , initargs=_lowerCamelCase , initializer=_lowerCamelCase ) as pool:
A_ : Dict = pool.map(_lowerCamelCase , _lowerCamelCase )
logger.info(f'Finished {num_proc} processes' )
A_ : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(_lowerCamelCase )} objects' )
return mapped
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case__ ( lowerCamelCase__ : str ) -> int:
A_ : Optional[int] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A_ : int = None
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case__ = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[str]:
A_ : Optional[int] = torch.load(lowerCamelCase__ , map_location='''cpu''' )
return sd
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int=rename_keys_prefix ) -> Any:
A_ : Tuple = OrderedDict()
A_ : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A_ : Union[str, Any] = key
for name_pair in rename_keys_prefix:
A_ : Any = new_key.replace(name_pair[0] , name_pair[1] )
A_ : List[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A_ : Union[str, Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
A_ : Dict = '''pretraining'''
if "vcr" in checkpoint_path:
A_ : int = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A_ : List[str] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A_ : str = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A_ : Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
A_ : List[str] = {'''visual_embedding_dim''': 5_1_2}
A_ : List[Any] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A_ : List[Any] = {'''visual_embedding_dim''': 2_0_4_8}
A_ : Dict = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A_ : List[str] = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A_ : List[Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A_ : Optional[Any] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A_ : List[Any] = '''nlvr'''
A_ : Optional[Any] = VisualBertConfig(**lowerCamelCase__ )
# Load State Dict
A_ : Tuple = load_state_dict(lowerCamelCase__ )
A_ : Tuple = get_new_dict(lowerCamelCase__ , lowerCamelCase__ )
if model_type == "pretraining":
A_ : Dict = VisualBertForPreTraining(lowerCamelCase__ )
elif model_type == "vqa":
A_ : int = VisualBertForQuestionAnswering(lowerCamelCase__ )
elif model_type == "nlvr":
A_ : List[str] = VisualBertForVisualReasoning(lowerCamelCase__ )
elif model_type == "multichoice":
A_ : Dict = VisualBertForMultipleChoice(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Save Checkpoints
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Dict:
A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
A_ : List[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case__ ( lowerCamelCase__ : Dict ) -> Optional[int]:
A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
A_ : Optional[int] = tf.cast(math.pi , x.dtype )
A_ : Dict = tf.cast(0.044715 , x.dtype )
A_ : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def snake_case__ ( lowerCamelCase__ : Any ) -> int:
A_ : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> List[Any]:
A_ : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = tf.cast(0.044715 , x.dtype )
A_ : List[str] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case__ ( lowerCamelCase__ : str ) -> List[Any]:
A_ : str = tf.convert_to_tensor(lowerCamelCase__ )
A_ : Union[str, Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case__ ( lowerCamelCase__ : List[str] ) -> int:
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -1_0 , 1_0 )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=-1 ) -> Tuple:
A_ ,A_ : int = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[int]:
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
snake_case__ = tf.keras.activations.gelu
snake_case__ = approximate_gelu_wrap
else:
snake_case__ = _gelu
snake_case__ = _gelu_new
snake_case__ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> List[str]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : Any=2_8_1_2_3 ) -> Tuple:
A_ : Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A_ : Union[str, Any] = set()
A_ : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
snake_case__ = 'examples/'
snake_case__ = {
'examples': (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), 'release = "VERSION"\n'),
}
snake_case__ = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
snake_case__ = 'README.md'
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ) -> List[str]:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : Tuple = f.read()
A_ : str = REPLACE_PATTERNS[pattern]
A_ : List[str] = replace.replace('''VERSION''' , UpperCamelCase__ )
A_ : str = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> List[str]:
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str=False ) -> Union[str, Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def snake_case__ ( ) -> Optional[Any]:
A_ : str = '''🤗 Transformers currently provides the following architectures'''
A_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A_ : str = f.readlines()
# Find the start of the list.
A_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def snake_case__ ( ) -> Tuple:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
A_ : Optional[int] = f.read()
A_ : List[Any] = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any=False ) -> int:
A_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ : str = default_version.base_version
elif patch:
A_ : List[str] = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A_ : Optional[Any] = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A_ : Optional[Any] = input(f'Which version are you releasing? [{default_version}]' )
if len(UpperCamelCase__ ) == 0:
A_ : List[str] = default_version
print(f'Updating version to {version}.' )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def snake_case__ ( ) -> Union[str, Any]:
A_ : str = get_version()
A_ : List[Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
A_ : Any = current_version.base_version
# Check with the user we got that right.
A_ : List[str] = input(f'Which version are we developing now? [{dev_version}]' )
if len(UpperCamelCase__ ) == 0:
A_ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
snake_case__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 351 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase_ (_lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'imagegpt'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=512 + 1 , _lowerCamelCase : Optional[int]=32 * 32 , _lowerCamelCase : int=512 , _lowerCamelCase : int=24 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple="quick_gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=1E-5 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=False , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=False , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : str = vocab_size
A_ : Union[str, Any] = n_positions
A_ : Tuple = n_embd
A_ : Tuple = n_layer
A_ : List[Any] = n_head
A_ : Any = n_inner
A_ : Any = activation_function
A_ : Dict = resid_pdrop
A_ : List[Any] = embd_pdrop
A_ : List[Any] = attn_pdrop
A_ : Optional[int] = layer_norm_epsilon
A_ : Optional[int] = initializer_range
A_ : Dict = scale_attn_weights
A_ : Tuple = use_cache
A_ : Tuple = scale_attn_by_inverse_layer_idx
A_ : Optional[int] = reorder_and_upcast_attn
A_ : str = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class UpperCamelCase_ (_lowerCamelCase ):
"""simple docstring"""
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def _a ( self : Optional[Any] , _lowerCamelCase : "FeatureExtractionMixin" , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 32 , ):
"""simple docstring"""
A_ : Dict = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : int = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 352 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '''▁'''
snake_case__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
snake_case__ = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
snake_case__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase_ (lowerCamelCase__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Any="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : Tuple="<mask>" , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : Any = legacy_behaviour
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
A_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : Dict = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Dict = src_lang if src_lang is not None else "eng_Latn"
A_ : List[str] = self.lang_code_to_id[self._src_lang]
A_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
A_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Tuple = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Dict , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : int = [1] * len(self.prefix_tokens )
A_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : str = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : Any ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : str = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
A_ : Any = self.convert_tokens_to_ids(_lowerCamelCase )
A_ : Optional[int] = tgt_lang_id
return inputs
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Optional[int] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Dict = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : Union[str, Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : Optional[Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = "".join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _a ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A_ : str = []
A_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Any = [self.cur_lang_code]
A_ : str = [self.eos_token_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A_ : Optional[int] = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : List[Any] = [self.cur_lang_code]
A_ : int = [self.eos_token_id]
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Any = LxmertTokenizer
_lowerCAmelCase : Tuple = LxmertTokenizerFast
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : str = True
def _a ( self : List[Any] ):
"""simple docstring"""
super().setUp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Tuple = '''UNwant\u00E9d,running'''
A_ : Dict = '''unwanted, running'''
return input_text, output_text
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : int = self.tokenizer_class(self.vocab_file )
A_ : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _a ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : List[Any] = '''I was born in 92000, and this is falsé.'''
A_ : int = tokenizer.tokenize(lowerCamelCase_ )
A_ : Dict = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
A_ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
A_ : Any = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
A_ : List[str] = self.get_rust_tokenizer()
A_ : str = tokenizer.encode(lowerCamelCase_ )
A_ : Optional[int] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def _a ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[str] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A_ : Optional[Any] = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : str = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
] , )
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A_ : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A_ : int = '''How many cats are there?'''
A_ : Tuple = vqa_pipeline(image=lowercase_ , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
A_ : List[str] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
@slow
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
A_ : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A_ : Dict = '''How many cats are there?'''
A_ : int = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
A_ : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
A_ : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
snake_case__ = list[tuple[int, int]]
snake_case__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : str = pos_x
A_ : int = pos_y
A_ : Dict = (pos_y, pos_x)
A_ : Optional[Any] = goal_x
A_ : Optional[int] = goal_y
A_ : Any = parent
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase__ )
A_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase__ )
A_ : Optional[Any] = [self.start]
A_ : Union[str, Any] = False
def _a ( self : str ):
"""simple docstring"""
while self.node_queue:
A_ : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A_ : Optional[int] = True
return self.retrace_path(lowerCamelCase__ )
A_ : Any = self.get_successors(lowerCamelCase__ )
for node in successors:
self.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _a ( self : Any , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = []
for action in delta:
A_ : List[Any] = parent.pos_x + action[1]
A_ : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , lowerCamelCase__ ) )
return successors
def _a ( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Optional[Any] = node
A_ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A_ : List[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : int = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A_ : Union[str, Any] = self.fwd_bfs.node_queue.pop(0 )
A_ : Tuple = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A_ : Union[str, Any] = True
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
A_ : List[Any] = current_bwd_node
A_ : Any = current_fwd_node
A_ : int = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _a ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Dict = self.fwd_bfs.retrace_path(lowerCamelCase__ )
A_ : List[Any] = self.bwd_bfs.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A_ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case__ = (0, 0)
snake_case__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case__ = time.time()
snake_case__ = BreadthFirstSearch(init, goal)
snake_case__ = bfs.search()
snake_case__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
snake_case__ = time.time()
snake_case__ = BidirectionalBreadthFirstSearch(init, goal)
snake_case__ = bd_bfs.search()
snake_case__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 356 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case__ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
snake_case__ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
snake_case__ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict="auto" , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Optional[Any]=0.9 , _lowerCamelCase : Any=5 , _lowerCamelCase : int=500 , _lowerCamelCase : Optional[int]="gpt2-large" , _lowerCamelCase : Optional[int]=-1 , _lowerCamelCase : Dict=1024 , _lowerCamelCase : Tuple=25 , _lowerCamelCase : str=5 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Union[str, Any]=25 , ):
"""simple docstring"""
A_ : int = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'unispeech'
def __init__( self : int , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : List[Any]=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : List[Any]="group" , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Dict=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase : Any=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase : int=False , _lowerCamelCase : Any=128 , _lowerCamelCase : int=16 , _lowerCamelCase : str=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=0.05 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str=0.0 , _lowerCamelCase : str=10 , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Any=320 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Optional[int]=100 , _lowerCamelCase : Optional[int]=256 , _lowerCamelCase : Tuple=256 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple="mean" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Any=False , _lowerCamelCase : int=256 , _lowerCamelCase : Union[str, Any]=80 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : str=2 , _lowerCamelCase : Optional[int]=0.5 , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
A_ : List[Any] = hidden_size
A_ : List[Any] = feat_extract_norm
A_ : int = feat_extract_activation
A_ : int = list(__A )
A_ : str = list(__A )
A_ : Tuple = list(__A )
A_ : List[Any] = conv_bias
A_ : Optional[int] = num_conv_pos_embeddings
A_ : Optional[Any] = num_conv_pos_embedding_groups
A_ : Any = len(self.conv_dim )
A_ : str = num_hidden_layers
A_ : List[Any] = intermediate_size
A_ : Any = hidden_act
A_ : Dict = num_attention_heads
A_ : List[Any] = hidden_dropout
A_ : List[str] = attention_dropout
A_ : str = activation_dropout
A_ : Tuple = feat_proj_dropout
A_ : Optional[int] = final_dropout
A_ : Tuple = layerdrop
A_ : Union[str, Any] = layer_norm_eps
A_ : Optional[int] = initializer_range
A_ : List[str] = num_ctc_classes
A_ : Any = vocab_size
A_ : Optional[Any] = do_stable_layer_norm
A_ : Optional[int] = use_weighted_layer_sum
A_ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Union[str, Any] = apply_spec_augment
A_ : Dict = mask_time_prob
A_ : Tuple = mask_time_length
A_ : Tuple = mask_time_min_masks
A_ : Any = mask_feature_prob
A_ : List[str] = mask_feature_length
A_ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ : str = num_codevectors_per_group
A_ : Optional[Any] = num_codevector_groups
A_ : Union[str, Any] = contrastive_logits_temperature
A_ : Union[str, Any] = feat_quantizer_dropout
A_ : Tuple = num_negatives
A_ : Tuple = codevector_dim
A_ : int = proj_codevector_dim
A_ : Optional[Any] = diversity_loss_weight
# ctc loss
A_ : Optional[Any] = ctc_loss_reduction
A_ : Union[str, Any] = ctc_zero_infinity
# pretraining loss
A_ : List[Any] = replace_prob
@property
def _a ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 358 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 0 |
'''simple docstring'''
snake_case__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
snake_case__ = {value: key for key, value in encode_dict.items()}
def snake_case__ ( lowerCamelCase__ : str ) -> List[Any]:
A_ : int = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def snake_case__ ( lowerCamelCase__ : str ) -> List[str]:
if set(lowerCamelCase__ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
A_ : str = """"""
for word in coded.split():
while len(lowerCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
A_ : Optional[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case__ = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
snake_case__ = {
"""facebook/mbart-large-50-one-to-many-mmt""": 10_24,
}
# fmt: off
snake_case__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Tuple="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : Optional[int]="<mask>" , _lowerCamelCase : Optional[Any] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
A_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
A_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : Any = 1
A_ : Optional[Any] = len(self.sp_model )
A_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase_ )
}
A_ : Dict = {v: k for k, v in self.lang_code_to_id.items()}
A_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : str = src_lang if src_lang is not None else '''en_XX'''
A_ : List[str] = self.lang_code_to_id[self._src_lang]
A_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
"""simple docstring"""
A_ : Any = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__( self : str , _lowerCamelCase : int ):
"""simple docstring"""
A_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Dict = {}
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : List[Any] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def _a ( self : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Optional[int] = self.sp_model.PieceToId(lowerCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : Any , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : str , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Optional[Any] = []
A_ : Tuple = ''''''
A_ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
A_ : Optional[int] = True
A_ : Any = []
else:
current_sub_tokens.append(lowerCamelCase_ )
A_ : List[str] = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
A_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def _a ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] = None , _lowerCamelCase : Dict = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
A_ : str = [1] * len(self.prefix_tokens )
A_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def _a ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , **_lowerCamelCase : List[str] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : Any = src_lang
A_ : Tuple = self(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
A_ : str = self.convert_tokens_to_ids(lowerCamelCase_ )
A_ : Optional[Any] = tgt_lang_id
return inputs
def _a ( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] = "en_XX" , _lowerCamelCase : Dict = None , _lowerCamelCase : Optional[int] = "ro_RO" , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
A_ : Optional[Any] = src_lang
A_ : Any = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def _a ( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Dict ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Any , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = self.lang_code_to_id[src_lang]
A_ : str = [self.cur_lang_code_id]
A_ : List[str] = [self.eos_token_id]
def _a ( self : Tuple , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Dict = [self.cur_lang_code_id]
A_ : Union[str, Any] = [self.eos_token_id]
| 360 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=None ) -> Dict:
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
A_ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
A_ : int = new_module
A_ : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
A_ : Union[str, Any] = tensor_name in module._buffers
A_ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
A_ : Union[str, Any] = False
A_ : str = False
if is_buffer or not is_bitsandbytes_available():
A_ : Optional[Any] = False
A_ : Optional[int] = False
else:
A_ : Optional[int] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A_ : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A_ : Any = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A_ : List[str] = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
A_ : int = value.to('''cpu''' )
if value.dtype == torch.inta:
A_ : Optional[int] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A_ : Dict = torch.tensor(__lowerCAmelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
A_ : Dict = new_value.T
A_ : List[Any] = old_value.__dict__
if is_abit:
A_ : List[str] = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
A_ : Optional[int] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
A_ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
A_ : List[str] = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
A_ : Tuple = value.to(__lowerCAmelCase )
else:
A_ : Dict = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
A_ : Union[str, Any] = new_value
else:
A_ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
A_ : int = new_value
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : str=False ) -> Optional[int]:
for name, module in model.named_children():
if current_key_name is None:
A_ : str = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A_ : Tuple = module.weight.shape
else:
A_ : Tuple = module.in_features
A_ : List[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A_ : int = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A_ : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A_ : Tuple = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A_ : List[str] = True
# Store the module class in case we need to transpose the weight later
A_ : List[str] = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
A_ : List[str] = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[int]=None ) -> str:
A_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A_ : str = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) -> Tuple:
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case__ ( *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ) -> Dict:
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[str]:
A_ : str = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A_ : Optional[int] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A_ : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A_ : Optional[Any] = sum(__lowerCAmelCase , [] )
A_ : List[Any] = len(__lowerCAmelCase ) > 0
# Check if it is a base model
A_ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : str = list(model.named_children() )
A_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
A_ : int = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
A_ : List[Any] = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
A_ : List[Any] = ['''.weight''', '''.bias''']
A_ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : int = name.replace(__lowerCAmelCase , '''''' )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ : Optional[int] = str(bin(_lowercase ) )[2:] # remove the leading "0b"
A_ : Optional[Any] = str(bin(_lowercase ) )[2:] # remove the leading "0b"
A_ : str = max(len(_lowercase ) , len(_lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowercase ) , b_binary.zfill(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInpaintPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
A_ : Union[str, Any] = CLIPTextModel(a__ )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
A_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : int = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
A_ : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
A_ : Union[str, Any] = torch.manual_seed(a__ )
else:
A_ : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : Tuple = StableDiffusionInpaintPipeline(**a__ )
A_ : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : List[Any] = self.get_dummy_inputs(a__ )
A_ : List[Any] = sd_pipe(**a__ ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : int = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Tuple ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ):
"""simple docstring"""
A_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A_ : str = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Union[str, Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : List[Any] = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A_ : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : List[str] = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self : str ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Any = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
A_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : List[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 363 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase_ (__lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = '''microsoft/speecht5_tts'''
_lowerCAmelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_lowerCAmelCase = '''text_reader'''
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ['''text''']
_lowerCAmelCase = ['''audio''']
def _a ( self : List[Any] ):
"""simple docstring"""
if self.post_processor is None:
A_ : str = '''microsoft/speecht5_hifigan'''
super().setup()
def _a ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None ):
"""simple docstring"""
A_ : Dict = self.pre_processor(text=lowerCAmelCase_ , return_tensors='''pt''' , truncation=lowerCAmelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A_ : Optional[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A_ : Union[str, Any] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase_ )
def _a ( self : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase_ ).cpu().detach()
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase_ (_lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = 'marian'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , _lowerCamelCase : Union[str, Any]=58101 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1024 , _lowerCamelCase : int=12 , _lowerCamelCase : Optional[int]=4096 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : List[Any]=4096 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple="gelu" , _lowerCamelCase : Dict=1024 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Optional[Any]=58100 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Any=58100 , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[int]=True , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
A_ : str = vocab_size
A_ : List[str] = decoder_vocab_size or vocab_size
A_ : str = max_position_embeddings
A_ : Dict = d_model
A_ : Dict = encoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Union[str, Any] = encoder_attention_heads
A_ : Any = decoder_ffn_dim
A_ : Dict = decoder_layers
A_ : List[Any] = decoder_attention_heads
A_ : Optional[Any] = dropout
A_ : str = attention_dropout
A_ : List[str] = activation_dropout
A_ : Any = activation_function
A_ : Optional[int] = init_std
A_ : Union[str, Any] = encoder_layerdrop
A_ : Any = decoder_layerdrop
A_ : Dict = use_cache
A_ : Optional[int] = encoder_layers
A_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase_ (_lowerCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a ( self : str ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ : Optional[Any] = {0: """batch"""}
A_ : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : int = {0: """batch""", 1: """decoder_sequence"""}
A_ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ : Any = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
A_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a ( self : int ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super().outputs
else:
A_ : Tuple = super(SCREAMING_SNAKE_CASE_ , self ).outputs
if self.use_past:
A_ : Any = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
A_ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] = -1 , _lowerCamelCase : List[Any] = -1 , _lowerCamelCase : int = False , _lowerCamelCase : List[str] = None , ):
"""simple docstring"""
A_ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Generate decoder inputs
A_ : Any = seq_length if not self.use_past else 1
A_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : int = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A_ : Any = dict(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ : int = common_inputs["""input_ids"""].shape
A_ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
A_ : str = self.num_attention_heads
A_ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : int = decoder_seq_length + 3
A_ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : Tuple = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , dim=1 )
A_ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ : Optional[int] = self.num_layers
A_ : Optional[Any] = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Tuple = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - min_num_layers
A_ : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
) )
# TODO: test this.
A_ : Any = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) )
return common_inputs
def _a ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] = -1 , _lowerCamelCase : str = -1 , _lowerCamelCase : int = False , _lowerCamelCase : Optional[int] = None , ):
"""simple docstring"""
A_ : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : List[str] = seqlen + 2
A_ : Optional[int] = self.num_layers
A_ : Any = self.num_attention_heads
A_ : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Optional[int] = common_inputs["""attention_mask"""].dtype
A_ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
A_ : Union[str, Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(SCREAMING_SNAKE_CASE_ )
]
return common_inputs
def _a ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any = -1 , _lowerCamelCase : Any = -1 , _lowerCamelCase : Optional[Any] = False , _lowerCamelCase : Union[str, Any] = None , ):
"""simple docstring"""
A_ : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : List[Any] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ )
A_ : List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE_ )
# Generate dummy inputs according to compute batch and sequence
A_ : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : Optional[int] = dict(tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) )
return common_inputs
def _a ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str = -1 , _lowerCamelCase : Tuple = -1 , _lowerCamelCase : str = False , _lowerCamelCase : int = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
else:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return common_inputs
def _a ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[int] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
A_ : str = super(SCREAMING_SNAKE_CASE_ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
snake_case__ = parser.parse_args()
snake_case__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 366 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase_ (_a, unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : int ):
"""simple docstring"""
A_ : Any = ort.SessionOptions()
A_ : List[str] = False
return options
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
A_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
A_ : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : str = 'A red cat sitting on a park bench'
A_ : Union[str, Any] = np.random.RandomState(0 )
A_ : str = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type='''np''' , )
A_ : Optional[int] = output.images
A_ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A_ : Optional[int] = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : Any ):
"""simple docstring"""
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
A_ : Any = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
A_ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[int] = 'A red cat sitting on a park bench'
A_ : List[Any] = np.random.RandomState(0 )
A_ : Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type='''np''' , )
A_ : int = output.images
A_ : str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A_ : int = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 367 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class UpperCamelCase_ (__a ):
"""simple docstring"""
_lowerCAmelCase = field(default='automatic-speech-recognition', metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCAmelCase = Features({'audio': Audio()} )
_lowerCAmelCase = Features({'transcription': Value('string' )} )
_lowerCAmelCase = 'audio'
_lowerCAmelCase = 'transcription'
def _a ( self : Dict , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , UpperCamelCase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
A_ : int = copy.deepcopy(self )
A_ : Dict = self.input_schema.copy()
A_ : Optional[int] = features[self.audio_column]
A_ : List[str] = input_schema
return task_template
@property
def _a ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ (_lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = KandinskyVaaPipeline
_lowerCAmelCase = [
"image_embeds",
"negative_image_embeds",
]
_lowerCAmelCase = ["image_embeds", "negative_image_embeds"]
_lowerCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def _a ( self : Any ):
"""simple docstring"""
return 32
@property
def _a ( self : str ):
"""simple docstring"""
return 32
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : Dict ):
"""simple docstring"""
return 100
@property
def _a ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ : int = UNetaDConditionModel(**_lowercase )
return model
@property
def _a ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : str ):
"""simple docstring"""
A_ : Tuple = self.dummy_unet
A_ : Optional[int] = self.dummy_movq
A_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
A_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _a ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str=0 ):
"""simple docstring"""
A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
A_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
if str(_lowercase ).startswith('''mps''' ):
A_ : List[Any] = torch.manual_seed(_lowercase )
else:
A_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A_ : int = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = '''cpu'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : List[str] = self.pipeline_class(**_lowercase )
A_ : Union[str, Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A_ : Optional[Any] = pipe(**self.get_dummy_inputs(_lowercase ) )
A_ : Tuple = output.images
A_ : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
A_ : Any = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
A_ : Optional[int] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
A_ : str = '''red cat, 4k photo'''
A_ : Optional[int] = torch.Generator(device='''cuda''' ).manual_seed(0 )
A_ ,A_ : str = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A_ : List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
A_ : Union[str, Any] = pipeline(
image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , output_type='''np''' , )
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
snake_case__ = logging.getLogger(__name__)
class UpperCamelCase_ (_a ):
"""simple docstring"""
_lowerCAmelCase = """token-classification"""
def __init__( self : Tuple , _lowerCamelCase : List[str] ):
"""simple docstring"""
if type(__lowerCAmelCase ) == dict:
A_ : Optional[Any] = Namespace(**__lowerCAmelCase )
A_ : int = import_module('''tasks''' )
try:
A_ : Optional[int] = getattr(__lowerCAmelCase , hparams.task_type )
A_ : str = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
A_ : List[Any] = self.token_classification_task.get_labels(hparams.labels )
A_ : Union[str, Any] = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _a ( self : str , **_lowerCamelCase : str ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def _a ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
A_ : Optional[Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
A_ : Any = self(**__lowerCAmelCase )
A_ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = self.hparams
for mode in ["train", "dev", "test"]:
A_ : Dict = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
A_ : Union[str, Any] = torch.load(__lowerCAmelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ : int = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
A_ : List[Any] = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Any = False ):
"""simple docstring"""
A_ : Any = self._feature_file(__lowerCAmelCase )
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
A_ : Optional[Any] = torch.load(__lowerCAmelCase )
A_ : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
A_ : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
A_ : str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
A_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _a ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
"""Compute validation""" ""
A_ : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
A_ : Optional[int] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
A_ : Dict = self(**__lowerCAmelCase )
A_ ,A_ : List[str] = outputs[:2]
A_ : Tuple = logits.detach().cpu().numpy()
A_ : Optional[int] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Tuple , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : List[str] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
A_ : Optional[Any] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
A_ : Optional[Any] = np.argmax(__lowerCAmelCase , axis=2 )
A_ : Tuple = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ : Optional[Any] = dict(enumerate(self.labels ) )
A_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
A_ : Tuple = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
'''precision''': precision_score(__lowerCAmelCase , __lowerCAmelCase ),
'''recall''': recall_score(__lowerCAmelCase , __lowerCAmelCase ),
'''f1''': fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
A_ : str = dict(results.items() )
A_ : int = results
return ret, preds_list, out_label_list
def _a ( self : List[Any] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ ,A_ ,A_ : Tuple = self._eval_end(__lowerCAmelCase )
A_ : Optional[int] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Union[str, Any] , _lowerCamelCase : str ):
"""simple docstring"""
A_ ,A_ ,A_ : int = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
A_ : List[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( _lowerCamelCase : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowerCAmelCase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowerCAmelCase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowerCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
snake_case__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
snake_case__ = parser.parse_args()
snake_case__ = NERTransformer(args)
snake_case__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
snake_case__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
snake_case__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) -> Dict:
A_ : str = state_dict.pop(__a )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Dict:
A_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : str = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[Any] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=False ) -> Optional[int]:
A_ : Optional[Any] = ''
if is_panoptic:
A_ : Dict = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : int = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[:2_5_6, :]
A_ : List[str] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
A_ : str = in_proj_weight[-2_5_6:, :]
A_ : List[str] = in_proj_bias[-2_5_6:]
def snake_case__ ( ) -> List[Any]:
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : List[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) -> Any:
A_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : Tuple = 'resnet101'
if "dc5" in model_name:
A_ : int = True
A_ : Union[str, Any] = 'panoptic' in model_name
if is_panoptic:
A_ : Optional[int] = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = 'huggingface/label-files'
A_ : Optional[Any] = 'coco-detection-id2label.json'
A_ : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
A_ : Dict = {int(__a ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
A_ : Any = 'coco_panoptic' if is_panoptic else 'coco_detection'
A_ : List[Any] = ConditionalDetrImageProcessor(format=__a )
# prepare image
A_ : Any = prepare_img()
A_ : List[Any] = image_processor(images=__a , return_tensors='''pt''' )
A_ : Tuple = encoding['pixel_values']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
A_ : Tuple = torch.hub.load('''DeppMeng/ConditionalDETR''' , __a , pretrained=__a ).eval()
A_ : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Optional[int] = 'conditional_detr.' + src
rename_key(__a , __a , __a )
A_ : List[str] = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a , is_panoptic=__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A_ : Dict = state_dict.pop(__a )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : Any = state_dict.pop(__a )
A_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A_ : str = state_dict.pop(__a )
A_ : Dict = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(__a )
A_ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
A_ : Optional[Any] = ConditionalDetrForSegmentation(__a ) if is_panoptic else ConditionalDetrForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
model.push_to_hub(repo_id=__a , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A_ : Optional[Any] = conditional_detr(__a )
A_ : Optional[int] = model(__a )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int = 1_0_0_0 ) -> List[Any]:
A_ : Tuple = 1, 1
A_ : Any = 2
while True:
A_ : Union[str, Any] = 0
A_ : Optional[Any] = fa + fa
A_ : List[str] = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import os
import sys
import unittest
snake_case__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
snake_case__ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
snake_case__ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = get_test_to_tester_mapping(snake_case_ )
A_ : Tuple = get_test_to_tester_mapping(snake_case_ )
A_ : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
A_ : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = get_model_to_test_mapping(snake_case_ )
A_ : Any = get_model_to_test_mapping(snake_case_ )
A_ : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A_ : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def _a ( self : int ):
"""simple docstring"""
A_ : Any = get_model_to_tester_mapping(snake_case_ )
A_ : List[str] = get_model_to_tester_mapping(snake_case_ )
A_ : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A_ : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 351 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : Any ) -> Optional[int]:
A_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
A_ : List[str] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
A_ : List[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ : int = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A_ : Optional[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCamelCase__ )-1}' )
if "norm" in key:
A_ : Union[str, Any] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
A_ : Optional[int] = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCamelCase__ )-1}' )
if "layer_norm1" in key:
A_ : str = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A_ : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A_ : Dict = key[key.find('''block''' ) + len('''block''' )]
A_ : Union[str, Any] = key.replace(f'block{idx}' , f'block.{int(lowerCamelCase__ )-1}' )
if "attn.q" in key:
A_ : Optional[int] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A_ : str = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A_ : str = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A_ : str = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A_ : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A_ : Optional[int] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A_ : Optional[int] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A_ : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A_ : int = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCamelCase__ )-1}' )
if "bot_conv" in key:
A_ : Optional[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
A_ : Any = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
A_ : Union[str, Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
A_ : Any = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
A_ : Union[str, Any] = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
A_ : Optional[int] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
A_ : Optional[int] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
A_ : Dict = key.replace('''module.last_layer_depth''' , '''head.head''' )
A_ : Dict = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> Dict:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ : List[Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
A_ : List[Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A_ : Dict = kv_weight[
: config.hidden_sizes[i], :
]
A_ : int = kv_bias[: config.hidden_sizes[i]]
A_ : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
A_ : int = kv_bias[config.hidden_sizes[i] :]
def snake_case__ ( ) -> Union[str, Any]:
A_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : int=None ) -> List[str]:
A_ : List[str] = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
A_ : int = GLPNImageProcessor()
# prepare image
A_ : Any = prepare_img()
A_ : Any = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
A_ : Dict = torch.load(lowerCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
A_ : List[str] = rename_keys(lowerCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase__ , lowerCamelCase__ )
# create HuggingFace model and load state dict
A_ : Optional[Any] = GLPNForDepthEstimation(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# forward pass
A_ : Union[str, Any] = model(lowerCamelCase__ )
A_ : List[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ : List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
A_ : List[Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Optional[Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase__ , )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
snake_case__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 352 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case__ = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase_ (folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
class UpperCamelCase_ (folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = datasets.Audio()
_lowerCAmelCase = "audio"
_lowerCAmelCase = AudioFolderConfig
_lowerCAmelCase = 42 # definition at the bottom of the script
_lowerCAmelCase = AudioClassification(audio_column='audio', label_column='label' )
snake_case__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
snake_case__ = AUDIO_EXTENSIONS
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : bool = True , _lowerCamelCase : bool = False ):
"""simple docstring"""
A_ : Optional[int] = scheduler
A_ : List[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
A_ : Union[str, Any] = split_batches
A_ : List[Any] = step_with_optimizer
A_ : List[str] = GradientState()
def _a ( self : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A_ : Any = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def _a ( self : Any ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def _a ( self : Tuple ):
"""simple docstring"""
return self.scheduler.state_dict()
def _a ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
self.scheduler.load_state_dict(UpperCamelCase_ )
def _a ( self : Any ):
"""simple docstring"""
return self.scheduler.get_lr()
def _a ( self : Any , *_lowerCamelCase : List[str] , **_lowerCamelCase : Any ):
"""simple docstring"""
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
'''simple docstring'''
import numpy as np
snake_case__ = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
A_ : int = np.array(__lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Dict = np.where(letter == self.SQUARE )
A_ : str = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _a ( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _a ( self : Union[str, Any] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Tuple = message.lower()
A_ : Optional[int] = message.replace(''' ''' , '''''' )
A_ : List[Any] = message.replace('''j''' , '''i''' )
A_ : Union[str, Any] = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
A_ : List[str] = self.letter_to_numbers(message[letter_index] )
A_ : List[Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Dict = first_step.reshape(2 * len(__lowerCamelCase ) )
A_ : Dict = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
A_ : Dict = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
A_ : Dict = encoded_message + letter
return encoded_message
def _a ( self : Any , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = message.lower()
message.replace(''' ''' , '''''' )
A_ : Optional[int] = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
A_ : Any = self.letter_to_numbers(message[letter_index] )
A_ : List[str] = numbers[0]
A_ : Tuple = numbers[1]
A_ : Union[str, Any] = first_step.reshape((2, len(__lowerCamelCase )) )
A_ : int = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
A_ : Any = int(second_step[0, numbers_index] )
A_ : int = int(second_step[1, numbers_index] )
A_ : List[Any] = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
A_ : Any = decoded_message + letter
return decoded_message
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 356 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import re
snake_case__ = 'src/transformers'
# Pattern that looks at the indentation in a line.
snake_case__ = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
snake_case__ = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
snake_case__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
snake_case__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
snake_case__ = re.compile(R"""\[([^\]]+)\]""")
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Dict:
A_ : Tuple = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : int="" , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[int]=None ) -> List[str]:
A_ : int = 0
A_ : Tuple = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
A_ : Optional[int] = ['\n'.join(lines[:index] )]
else:
A_ : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A_ : Any = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_A ) )
if index < len(_A ) - 1:
A_ : Optional[int] = [lines[index + 1]]
index += 1
else:
A_ : Dict = []
else:
blocks.append('''\n'''.join(_A ) )
A_ : Optional[int] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('''\n'''.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def snake_case__ ( lowerCamelCase__ : str ) -> int:
def _inner(lowerCamelCase__ : Union[str, Any] ):
return key(_A ).lower().replace('''_''' , '''''' )
return _inner
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(lowerCamelCase__ : List[str] ):
return x
if key is None:
A_ : Union[str, Any] = noop
# Constants are all uppercase, they go first.
A_ : Any = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A_ : Optional[int] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
A_ : List[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
A_ : List[Any] = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def snake_case__ ( lowerCamelCase__ : Tuple ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(lowerCamelCase__ : Optional[Any] ):
A_ : Any = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
A_ : Dict = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ : List[str] = keys[:-1]
return "[" + ", ".join([f'\"{k}\"' for k in sort_objects(_A )] ) + "]"
A_ : Optional[Any] = import_statement.split('''\n''' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A_ : List[str] = 2 if lines[1].strip() == '[' else 1
A_ : str = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A_ : Dict = sort_objects(_A , key=lambda lowerCamelCase__ : x[1] )
A_ : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A_ : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
A_ : Optional[int] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ : Optional[int] = keys[:-1]
A_ : List[str] = get_indent(lines[1] ) + ', '.join([f'\"{k}\"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
A_ : List[Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any=True ) -> List[str]:
with open(_A , encoding='''utf-8''' ) as f:
A_ : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A_ : Optional[Any] = split_code_in_indented_blocks(
_A , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A_ : Optional[Any] = main_blocks[block_idx]
A_ : Union[str, Any] = block.split('''\n''' )
# Get to the start of the imports.
A_ : List[str] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A_ : int = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
A_ : Any = '\n'.join(block_lines[line_idx:-1] )
A_ : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A_ : str = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
A_ : List[str] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A_ : Dict = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A_ : str = [(i, key) for i, key in enumerate(_A ) if key is not None]
A_ : Tuple = [x[0] for x in sorted(_A , key=lambda lowerCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A_ : str = 0
A_ : Any = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A_ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
A_ : Union[str, Any] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_A ) )
def snake_case__ ( lowerCamelCase__ : Dict=True ) -> Any:
A_ : List[Any] = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
A_ : Union[str, Any] = sort_imports(os.path.join(_A , '''__init__.py''' ) , check_only=_A )
if result:
A_ : str = [os.path.join(_A , '''__init__.py''' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCamelCase_ (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCAmelCase = '''camembert'''
def __init__( self : Union[str, Any] , _lowerCamelCase : List[Any]=30522 , _lowerCamelCase : Tuple=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : Union[str, Any]=3072 , _lowerCamelCase : int="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : List[Any]=1E-12 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : str=2 , _lowerCamelCase : Tuple="absolute" , _lowerCamelCase : int=True , _lowerCamelCase : Dict=None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A_ : List[str] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = hidden_act
A_ : Any = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Tuple = classifier_dropout
class UpperCamelCase_ (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 358 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (__a, __a, __a, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInpaintPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
A_ : Dict = CLIPTextModel(a__ )
A_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _a ( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=0 ):
"""simple docstring"""
A_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
A_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
A_ : str = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
A_ : str = torch.manual_seed(a__ )
else:
A_ : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : str = self.get_dummy_components()
A_ : List[Any] = StableDiffusionInpaintPipeline(**a__ )
A_ : Union[str, Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : Tuple = self.get_dummy_inputs(a__ )
A_ : Any = sd_pipe(**a__ ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : int = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A_ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Tuple = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : int = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self : List[str] ):
"""simple docstring"""
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A_ : int = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : str = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : int = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : int = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
A_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : int = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case__ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (lowerCamelCase__ ):
"""simple docstring"""
_lowerCAmelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Optional[int] , **_lowerCamelCase : int ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ : Union[str, Any] = deprecated_arg[3:]
setattr(self , __A , not kwargs.pop(__A ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A_ : int = kwargs.pop('''torchscript''' , self.torchscript )
A_ : Union[str, Any] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
A_ : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__A )
_lowerCAmelCase = field(default=lowerCamelCase__, metadata={'help': 'Trace the models using torchscript'} )
_lowerCAmelCase = field(default=lowerCamelCase__, metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
_lowerCAmelCase = field(
default='O1', metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
}, )
@cached_property
def _a ( self : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
A_ : Tuple = torch.device('''cpu''' )
A_ : int = 0
elif is_torch_tpu_available():
A_ : int = xm.xla_device()
A_ : List[str] = 0
else:
A_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ : List[str] = torch.cuda.device_count()
return device, n_gpu
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def _a ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.n_gpu > 0
| 360 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
snake_case__ = get_tests_dir("""fixtures""")
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A_ : List[Any] = mock.Mock()
A_ : List[str] = 500
A_ : Tuple = {}
A_ : int = HTTPError
A_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowerCamelCase )
@is_staging_test
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A_ : int = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
A_ : str = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 4 | 0 |
'''simple docstring'''
import json
import sys
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
with open(__snake_case , encoding='''utf-8''' ) as f:
A_ : Union[str, Any] = json.load(__snake_case )
A_ : Union[str, Any] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
A_ : str = results[benchmark_name]
A_ : Union[str, Any] = benchmark_name.split('''/''' )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
A_ : Dict = '''| metric |'''
A_ : Any = '''|--------|'''
A_ : Union[str, Any] = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
A_ : Tuple = benchmark_res[metric_name]
A_ : int = metric_vals['''new''']
A_ : Any = metric_vals.get('''old''' , __snake_case )
A_ : Union[str, Any] = metric_vals.get('''diff''' , __snake_case )
A_ : List[str] = f' {new_val:f}' if isinstance(__snake_case , (int, float) ) else '''None'''
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
snake_case__ = sys.argv[1]
snake_case__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
snake_case__ = parser.parse_args()
snake_case__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
snake_case__ = CLIPImageProcessor()
snake_case__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
snake_case__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 362 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 363 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
A_ : Dict = tmp_path / 'file.csv'
A_ : str = textwrap.dedent(
'''\\n header1,header2\n 1,2\n 10,20\n ''' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def snake_case__ ( lowerCamelCase__ : Tuple ) -> int:
A_ : List[Any] = tmp_path / 'malformed_file.csv'
A_ : Optional[Any] = textwrap.dedent(
'''\\n header1,header2\n 1,2\n 10,20,\n ''' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
A_ : str = tmp_path / 'csv_with_image.csv'
A_ : Tuple = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> List[Any]:
A_ : Any = tmp_path / 'csv_with_label.csv'
A_ : Union[str, Any] = textwrap.dedent(
'''\\n label\n good\n bad\n good\n ''' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def snake_case__ ( lowerCamelCase__ : List[str] ) -> int:
A_ : Optional[int] = tmp_path / 'csv_with_int_list.csv'
A_ : List[Any] = textwrap.dedent(
'''\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ''' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> str:
A_ : Optional[Any] = Csv()
A_ : Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def snake_case__ ( lowerCamelCase__ : Tuple ) -> Optional[int]:
with open(snake_case__ , encoding='''utf-8''' ) as f:
A_ : Union[str, Any] = f.read().splitlines()[1]
A_ : List[str] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
A_ : str = csv._generate_tables([[csv_file_with_image]] )
A_ : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
A_ : Tuple = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def snake_case__ ( lowerCamelCase__ : str ) -> Optional[Any]:
with open(snake_case__ , encoding='''utf-8''' ) as f:
A_ : Optional[int] = f.read().splitlines()[1:]
A_ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
A_ : Any = csv._generate_tables([[csv_file_with_label]] )
A_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
A_ : List[Any] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(snake_case__ ) for label in labels]
def snake_case__ ( lowerCamelCase__ : Dict ) -> Optional[Any]:
A_ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowerCamelCase__ : [int(snake_case__ ) for i in x.split()]} )
A_ : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
A_ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
A_ : List[Any] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 0 |
'''simple docstring'''
snake_case__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None # sigma(t_i)
@classmethod
def _a ( cls : Optional[Any] ):
"""simple docstring"""
return cls()
@dataclass
class UpperCamelCase_ (UpperCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class UpperCamelCase_ (UpperCAmelCase_, UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[str] , _lowerCamelCase : List[str] = 0.02 , _lowerCamelCase : int = 100 , _lowerCamelCase : List[Any] = 1.0_07 , _lowerCamelCase : Dict = 80 , _lowerCamelCase : Union[str, Any] = 0.05 , _lowerCamelCase : List[str] = 50 , ):
"""simple docstring"""
pass
def _a ( self : List[str] ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _a ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] = () ):
"""simple docstring"""
A_ : Optional[Any] = jnp.arange(0 , __lowercase )[::-1].copy()
A_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowercase , schedule=jnp.array(__lowercase , dtype=jnp.floataa ) , timesteps=__lowercase , )
def _a ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
A_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Union[str, Any] = random.split(__lowercase , num=1 )
A_ : str = self.config.s_noise * random.normal(key=__lowercase , shape=sample.shape )
A_ : str = sigma + gamma * sigma
A_ : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : int = True , ):
"""simple docstring"""
A_ : Optional[Any] = sample_hat + sigma_hat * model_output
A_ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
A_ : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowercase , derivative=__lowercase , state=__lowercase )
def _a ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Dict = True , ):
"""simple docstring"""
A_ : int = sample_prev + sigma_prev * model_output
A_ : Dict = (sample_prev - pred_original_sample) / sigma_prev
A_ : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowercase , derivative=__lowercase , state=__lowercase )
def _a ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
raise NotImplementedError()
| 366 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase_ (UpperCamelCase__ ):
"""simple docstring"""
_lowerCAmelCase = """perceiver"""
def __init__( self : int , _lowerCamelCase : Optional[Any]=256 , _lowerCamelCase : List[str]=1280 , _lowerCamelCase : List[Any]=768 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Dict=26 , _lowerCamelCase : Optional[Any]=8 , _lowerCamelCase : str=8 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]="kv" , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : str=1E-12 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=262 , _lowerCamelCase : Optional[Any]=2048 , _lowerCamelCase : List[Any]=56 , _lowerCamelCase : Any=[368, 496] , _lowerCamelCase : Tuple=16 , _lowerCamelCase : Optional[int]=1920 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=[1, 16, 224, 224] , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__a )
A_ : List[str] = num_latents
A_ : Dict = d_latents
A_ : List[str] = d_model
A_ : int = num_blocks
A_ : Optional[int] = num_self_attends_per_block
A_ : Union[str, Any] = num_self_attention_heads
A_ : str = num_cross_attention_heads
A_ : str = qk_channels
A_ : List[Any] = v_channels
A_ : str = cross_attention_shape_for_attention
A_ : str = self_attention_widening_factor
A_ : List[str] = cross_attention_widening_factor
A_ : List[Any] = hidden_act
A_ : Any = attention_probs_dropout_prob
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : List[str] = use_query_residual
# masked language modeling attributes
A_ : Dict = vocab_size
A_ : int = max_position_embeddings
# image classification attributes
A_ : Any = image_size
# flow attributes
A_ : List[Any] = train_size
# multimodal autoencoding attributes
A_ : Union[str, Any] = num_frames
A_ : Optional[Any] = audio_samples_per_frame
A_ : List[Any] = samples_per_patch
A_ : Optional[Any] = output_shape
class UpperCamelCase_ (UpperCamelCase__ ):
"""simple docstring"""
@property
def _a ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _a ( self : Dict ):
"""simple docstring"""
return 1E-4
def _a ( self : int , _lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 40 , _lowerCamelCase : int = 40 , ):
"""simple docstring"""
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : Optional[Any] = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Union[str, Any] = preprocessor.num_special_tokens_to_add(__a )
A_ : Optional[int] = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
A_ : Optional[int] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
A_ : Any = dict(preprocessor(__a , return_tensors=__a ) )
A_ : Tuple = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : str = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
A_ : int = self._generate_dummy_images(__a , __a , __a , __a )
A_ : Union[str, Any] = dict(preprocessor(images=__a , return_tensors=__a ) )
A_ : int = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 367 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Union[str, Any] = order
# a_{0} ... a_{k}
A_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ : Optional[Any] = [0.0] * self.order
def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
A_ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
A_ : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
A_ : Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
A_ : Tuple = a_coeffs
A_ : str = b_coeffs
def _a ( self : Tuple , _lowerCamelCase : float ):
"""simple docstring"""
A_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ : Optional[Any] = self.input_history[:-1]
A_ : List[str] = self.output_history[:-1]
A_ : Tuple = sample
A_ : Tuple = result
return result
| 4 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
snake_case__ = {
"""Salesforce/codegen-350M-mono""": 20_48,
}
class UpperCamelCase_ (_UpperCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = CodeGenTokenizer
def __init__( self : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Dict="<|endoftext|>" , _lowerCamelCase : Dict="<|endoftext|>" , _lowerCamelCase : List[Any]="<|endoftext|>" , _lowerCamelCase : Tuple=False , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , _UpperCAmelCase ):
A_ : Any = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
A_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
A_ : List[str] = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
A_ : int = add_prefix_space
A_ : str = pre_tok_class(**_UpperCAmelCase )
A_ : Optional[Any] = add_prefix_space
def _a ( self : List[str] , *_lowerCamelCase : Any , **_lowerCamelCase : str ):
"""simple docstring"""
A_ : Optional[Any] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : int , *_lowerCamelCase : int , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _a ( self : Tuple , _lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[List[str]] = None , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
A_ : List[Any] = super().decode(
token_ids=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , **_UpperCAmelCase , )
if truncate_before_pattern is not None and len(_UpperCAmelCase ) > 0:
A_ : Optional[int] = self.truncate(_UpperCAmelCase , _UpperCAmelCase )
return decoded_text
def _a ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
def find_re(_lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
A_ : int = pattern.search(_UpperCAmelCase , _UpperCAmelCase )
return m.start() if m else -1
A_ : Union[str, Any] = [re.compile(_UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A_ : Tuple = list(re.finditer('''^print''' , _UpperCAmelCase , re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
A_ : str = completion[: prints[1].start()]
A_ : Tuple = list(re.finditer('''^def''' , _UpperCAmelCase , re.MULTILINE ) )
if len(_UpperCAmelCase ) > 1:
A_ : Union[str, Any] = completion[: defs[1].start()]
A_ : Union[str, Any] = 0
A_ : Optional[Any] = [
pos for pos in [find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_UpperCAmelCase ) > 0:
return completion[: min(_UpperCAmelCase )]
else:
return completion
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ (__snake_case ):
"""simple docstring"""
_lowerCAmelCase = """ClapFeatureExtractor"""
_lowerCAmelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : int , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Dict=None , **_lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Dict = kwargs.pop('''sampling_rate''' , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
A_ : Any = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ : Optional[int] = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ : int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _a ( self : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self : str , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _a ( self : Any ):
"""simple docstring"""
A_ : str = self.tokenizer.model_input_names
A_ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 369 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
A_ : List[str] = []
def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ ,A_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ (lowerCamelCase_, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase = False
@property
def _a ( self : Tuple ):
"""simple docstring"""
return 32
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : List[str] ):
"""simple docstring"""
return 100
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ : Optional[int] = UNetaDConditionModel(**__snake_case )
return model
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[Any] = self.dummy_unet
A_ : Optional[int] = self.dummy_movq
A_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
A_ : Dict = DDIMScheduler(**__snake_case )
A_ : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _a ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=0 ):
"""simple docstring"""
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
A_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : str = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
A_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
A_ : List[Any] = torch.manual_seed(__snake_case )
else:
A_ : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
A_ : Union[str, Any] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = '''cpu'''
A_ : Optional[int] = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**__snake_case )
A_ : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ : Tuple = pipe(**self.get_dummy_inputs(__snake_case ) )
A_ : Tuple = output.images
A_ : Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Union[str, Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : int ):
"""simple docstring"""
A_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
A_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
A_ : Any = torch.from_numpy(np.array(__snake_case ) ).float() / 255.0
A_ : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
A_ : int = '''A robot, 4k photo'''
A_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
A_ : Optional[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
A_ : Any = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
A_ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ ,A_ : Dict = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt='''''' , ).to_tuple()
A_ : int = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
import math
def snake_case__ ( lowerCamelCase__ : str ) -> bool:
return math.sqrt(lowerCamelCase__ ) * math.sqrt(lowerCamelCase__ ) == num
def snake_case__ ( lowerCamelCase__ : Tuple ) -> bool:
A_ : Any = 0
A_ : Optional[int] = n
while left <= right:
A_ : Any = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ : Optional[int] = mid - 1
else:
A_ : Dict = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
A_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A_ : Optional[int] = {}
A_ : int = {}
for i, value in enumerate(_lowerCamelCase ):
A_ : Tuple = i
A_ : Optional[int] = i
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : List[str] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_lowerCamelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Tuple = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A_ : Union[str, Any] = {}
for i, token in enumerate(_lowerCamelCase ):
A_ : Dict = i
A_ : List[str] = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _a ( self : int ):
"""simple docstring"""
A_ : Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A_ : List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _a ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A_ : Dict = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A_ : int = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
A_ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = ['''的''', '''人''', '''有''']
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[str] = True
A_ : List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : str = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : Dict = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = False
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : int = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : List[Any] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Optional[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : Tuple = tokenizer.encode('''你好''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode('''你是谁''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : Dict = '''你好,你是谁'''
A_ : List[Any] = tokenizer.tokenize(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
A_ : int = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
A_ : Any = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
A_ : Any = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ (a__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _a ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
| 351 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str=False ) -> List[str]:
A_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Optional[Any] = ''''''
else:
A_ : Union[str, Any] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : int = in_proj_weight[
: config.hidden_size, :
]
A_ : List[str] = in_proj_bias[: config.hidden_size]
A_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Dict = in_proj_bias[-config.hidden_size :]
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Dict:
A_ : Optional[Any] = dct.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( ) -> List[str]:
A_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> Optional[Any]:
A_ : Any = DeiTConfig()
# all deit models have fine-tuned heads
A_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : List[Any] = 1_0_0_0
A_ : Union[str, Any] = '''huggingface/label-files'''
A_ : Tuple = '''imagenet-1k-id2label.json'''
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
A_ : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
A_ : str = int(deit_name[-6:-4] )
A_ : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
A_ : Optional[int] = 1_9_2
A_ : Dict = 7_6_8
A_ : List[str] = 1_2
A_ : Optional[int] = 3
elif deit_name[9:].startswith('''small''' ):
A_ : Tuple = 3_8_4
A_ : int = 1_5_3_6
A_ : Dict = 1_2
A_ : Any = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
A_ : Tuple = 1_0_2_4
A_ : List[Any] = 4_0_9_6
A_ : Any = 2_4
A_ : Union[str, Any] = 1_6
# load original model from timm
A_ : List[str] = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : List[Any] = timm_model.state_dict()
A_ : List[Any] = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
A_ : Optional[Any] = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : str = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : Union[str, Any] = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size )
A_ : str = image_processor(images=prepare_img() , return_tensors='''pt''' )
A_ : Tuple = encoding['''pixel_values''']
A_ : Dict = model(lowerCamelCase__ )
A_ : Tuple = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 352 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=1_0_2_4 , lowerCamelCase__ : Union[str, Any]=1_0_2_4 , lowerCamelCase__ : Any=False , **lowerCamelCase__ : List[Any] ) -> List[Any]:
A_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
A_ : Optional[Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path='''train''' , **lowerCamelCase__ )
A_ : List[str] = tok.pad_token_id
def get_lens(lowerCamelCase__ : str ):
A_ : int = tqdm(
DataLoader(lowerCamelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A_ : str = []
for batch in dl:
A_ : str = batch['''input_ids'''].ne(lowerCamelCase__ ).sum(1 ).tolist()
A_ : str = batch['''labels'''].ne(lowerCamelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ):
max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) )
else:
max_lens.extend(lowerCamelCase__ )
return max_lens
A_ : Any = get_lens(lowerCamelCase__ )
A_ : Optional[Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path='''val''' , **lowerCamelCase__ )
A_ : Any = get_lens(lowerCamelCase__ )
pickle_save(lowerCamelCase__ , train_ds.len_file )
pickle_save(lowerCamelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 354 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[Any]=224 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : Union[str, Any]=400 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : str = parent
A_ : Any = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Union[str, Any] = min_resolution
A_ : Dict = max_resolution
A_ : Any = do_resize
A_ : str = size
A_ : int = do_normalize
A_ : Optional[int] = image_mean
A_ : List[str] = image_std
def _a ( self : Tuple ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = EfficientFormerImageProcessorTester(self )
@property
def _a ( self : Any ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self : int ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Optional[int] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Optional[Any] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 355 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4 | 0 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
move_disk(lowerCamelCase__ , lowerCamelCase__ )
move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> Optional[Any]:
print('''moving disk from''' , lowerCamelCase__ , '''to''' , lowerCamelCase__ )
def snake_case__ ( ) -> Optional[Any]:
A_ : Tuple = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowerCamelCase__ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 356 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 4_2
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 358 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> str:
A_ : Optional[int] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
A_ : int = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(lowerCamelCase__ ) )
A_ : str = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.