code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A_ : Dict = TypeVar("""T""")
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> str:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : int = len(UpperCamelCase_ )
_UpperCAmelCase : Any = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase : List[str] = fnc
self.build()
def _snake_case ( self ) -> Dict:
for p in range(self.N - 1 ,0 ,-1 ):
_UpperCAmelCase : Optional[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def _snake_case ( self ,a_ ,a_ ) -> List[Any]:
p += self.N
_UpperCAmelCase : str = v
while p > 1:
_UpperCAmelCase : Optional[int] = p // 2
_UpperCAmelCase : List[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]: # noqa: E741
_UpperCAmelCase ,_UpperCAmelCase : List[str] = l + self.N, r + self.N
_UpperCAmelCase : List[str] = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCamelCase_ ,self.st[l] )
if r % 2 == 0:
_UpperCAmelCase : int = self.st[r] if res is None else self.fn(UpperCamelCase_ ,self.st[r] )
_UpperCAmelCase ,_UpperCAmelCase : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A_ : Optional[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
A_ : Any = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
A_ : Optional[int] = SegmentTree(test_array, min)
A_ : Dict = SegmentTree(test_array, max)
A_ : List[Any] = SegmentTree(test_array, lambda a, b: a + b)
def snake_case_ ( )-> Any:
'''simple docstring'''
for i in range(len(A__ ) ):
for j in range(A__ , len(A__ ) ):
_UpperCAmelCase : Union[str, Any] = reduce(A__ , test_array[i : j + 1] )
_UpperCAmelCase : Tuple = reduce(A__ , test_array[i : j + 1] )
_UpperCAmelCase : Optional[Any] = reduce(lambda lowerCAmelCase_ , lowerCAmelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A__ , A__ )
assert max_range == max_segment_tree.query(A__ , A__ )
assert sum_range == sum_segment_tree.query(A__ , A__ )
test_all_segments()
for index, value in test_updates.items():
A_ : Dict = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
"""simple docstring"""
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] ,mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" ,up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="""text""" ,addition_embed_type_num_heads=2 ,cross_attention_norm="""group_norm""" ,resnet_time_scale_shift="""scale_shift""" ,act_fn="""gelu""" ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase : Any = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="""squaredcos_cap_v2""" ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="""epsilon""" ,variance_type="""learned_range""" ,)
torch.manual_seed(0 )
_UpperCAmelCase : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] ,mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" ,up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="""text""" ,addition_embed_type_num_heads=2 ,cross_attention_norm="""group_norm""" ,resnet_time_scale_shift="""scale_shift""" ,act_fn="""gelu""" ,class_embed_type="""timestep""" ,mid_block_scale_factor=1.414 ,time_embedding_act_fn="""gelu""" ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase : Any = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="""squaredcos_cap_v2""" ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="""epsilon""" ,variance_type="""learned_range""" ,)
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="""squaredcos_cap_v2""" ,beta_start=0.0001 ,beta_end=0.02 ,)
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : List[str] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase : Dict = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = inputs["""prompt"""]
_UpperCAmelCase : int = inputs["""generator"""]
_UpperCAmelCase : Optional[int] = inputs["""num_inference_steps"""]
_UpperCAmelCase : List[Any] = inputs["""output_type"""]
if "image" in inputs:
_UpperCAmelCase : str = inputs["""image"""]
else:
_UpperCAmelCase : Union[str, Any] = None
if "mask_image" in inputs:
_UpperCAmelCase : Dict = inputs["""mask_image"""]
else:
_UpperCAmelCase : Dict = None
if "original_image" in inputs:
_UpperCAmelCase : Tuple = inputs["""original_image"""]
else:
_UpperCAmelCase : int = None
_UpperCAmelCase ,_UpperCAmelCase : Any = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_UpperCAmelCase : Union[str, Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_UpperCAmelCase : int = image
if mask_image is not None:
_UpperCAmelCase : Union[str, Any] = mask_image
if original_image is not None:
_UpperCAmelCase : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase ,__lowerCAmelCase ) is None ,f'''`{optional_component}` did not stay set to None after loading.''' ,)
_UpperCAmelCase : Tuple = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase : int = inputs["""generator"""]
_UpperCAmelCase : Optional[Any] = inputs["""num_inference_steps"""]
_UpperCAmelCase : Union[str, Any] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
_UpperCAmelCase : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_UpperCAmelCase : str = image
if mask_image is not None:
_UpperCAmelCase : Optional[int] = mask_image
if original_image is not None:
_UpperCAmelCase : Union[str, Any] = original_image
_UpperCAmelCase : Tuple = pipe_loaded(**__lowerCAmelCase )[0]
_UpperCAmelCase : Optional[int] = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1E-4 )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Dict = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Tuple = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCAmelCase : Dict = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase : Any = pipe_loaded(**__lowerCAmelCase )[0]
_UpperCAmelCase : Union[str, Any] = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1E-4 )
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
'''simple docstring'''
import random
class lowercase :
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> Dict:
_UpperCAmelCase : int = [ord(a_ ) for i in text]
_UpperCAmelCase : int = []
_UpperCAmelCase : str = []
for i in plain:
_UpperCAmelCase : Tuple = random.randint(1 ,300 )
_UpperCAmelCase : List[Any] = (i + k) * k
cipher.append(a_ )
key.append(a_ )
return cipher, key
@staticmethod
def _snake_case ( a_ ,a_ ) -> int:
_UpperCAmelCase : Optional[int] = []
for i in range(len(a_ ) ):
_UpperCAmelCase : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(a_ ) )
return "".join(a_ )
if __name__ == "__main__":
A_ : Optional[int] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 359 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowercase :
"""simple docstring"""
UpperCAmelCase = None
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,__lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[int] = os.path.join(__lowerCAmelCase ,"""feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Tuple = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(__lowerCAmelCase )
| 360 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase : Dict = model_type_to_module_name(a__ )
_UpperCAmelCase : Optional[int] = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(a__ , a__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a__ , """__name__""" , a__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase : Union[str, Any] = importlib.import_module("""transformers""" )
if hasattr(a__ , a__ ):
return getattr(a__ , a__ )
return None
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(a__ , encoding="""utf-8""" ) as reader:
return json.load(a__ )
class lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def _snake_case ( cls ,a_ ,**a_ ) -> Optional[int]:
_UpperCAmelCase : List[str] = kwargs.pop("""config""" ,_snake_case )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("""trust_remote_code""" ,_snake_case )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = ImageProcessingMixin.get_image_processor_dict(_snake_case ,**_snake_case )
_UpperCAmelCase : Tuple = config_dict.get("""image_processor_type""" ,_snake_case )
_UpperCAmelCase : Dict = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" ,{} ):
_UpperCAmelCase : List[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase : Optional[Any] = config_dict.pop("""feature_extractor_type""" ,_snake_case )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model\'s feature extractor configuration.""" )
_UpperCAmelCase : Tuple = feature_extractor_class.replace("""FeatureExtractor""" ,"""ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" ,{} ):
_UpperCAmelCase : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
_UpperCAmelCase : Optional[Any] = feature_extractor_auto_map.replace("""FeatureExtractor""" ,"""ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model\'s feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_snake_case ,_snake_case ):
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(_snake_case ,**_snake_case )
# It could be in `config.image_processor_type``
_UpperCAmelCase : Any = getattr(_snake_case ,"""image_processor_type""" ,_snake_case )
if hasattr(_snake_case ,"""auto_map""" ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase : Optional[int] = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
_UpperCAmelCase : Tuple = image_processor_class_from_name(_snake_case )
_UpperCAmelCase : str = image_processor_auto_map is not None
_UpperCAmelCase : Optional[int] = image_processor_class is not None or type(_snake_case ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase : Union[str, Any] = resolve_trust_remote_code(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if has_remote_code and trust_remote_code:
_UpperCAmelCase : Any = get_class_from_dynamic_module(
_snake_case ,_snake_case ,**_snake_case )
_UpperCAmelCase : Dict = kwargs.pop("""code_revision""" ,_snake_case )
if os.path.isdir(_snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_snake_case ,**_snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(_snake_case ,**_snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_snake_case ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase : Any = IMAGE_PROCESSOR_MAPPING[type(_snake_case )]
return image_processor_class.from_dict(_snake_case ,**_snake_case )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _snake_case ( a_ ,a_ ) -> Union[str, Any]:
IMAGE_PROCESSOR_MAPPING.register(_snake_case ,_snake_case )
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> None:
if len(a_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
_UpperCAmelCase : Optional[Any] = list(a_ )
_UpperCAmelCase : Optional[Any] = degree
def __add__( self ,a_ ) -> Polynomial:
if self.degree > polynomial_a.degree:
_UpperCAmelCase : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,a_ )
else:
_UpperCAmelCase : Optional[int] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,a_ )
def __sub__( self ,a_ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,a_ ) -> Polynomial:
_UpperCAmelCase : List[str] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,a_ )
def _snake_case ( self ,a_ ) -> int | float:
_UpperCAmelCase : Union[str, Any] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
_UpperCAmelCase : int = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a_ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _snake_case ( self ) -> Polynomial:
_UpperCAmelCase : Any = [0] * self.degree
for i in range(self.degree ):
_UpperCAmelCase : str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,a_ )
def _snake_case ( self ,a_ = 0 ) -> Polynomial:
_UpperCAmelCase : Union[str, Any] = [0] * (self.degree + 2)
_UpperCAmelCase : List[Any] = constant
for i in range(self.degree + 1 ):
_UpperCAmelCase : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,a_ )
def __eq__( self ,a_ ) -> bool:
if not isinstance(a_ ,a_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,a_ ) -> bool:
return not self.__eq__(a_ )
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ : str = False
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = """ybelkada/fonts"""
def snake_case_ ( )-> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
requires_backends(UpperCamelCase__ , ["""torch"""] )
_check_torch_version()
_UpperCAmelCase : Dict = image_tensor.unsqueeze(0 )
_UpperCAmelCase : Optional[int] = torch.nn.functional.unfold(UpperCamelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_UpperCAmelCase : Union[str, Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase__ , UpperCamelCase__ , -1 )
_UpperCAmelCase : Dict = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = 36 , lowerCAmelCase_ = "black" , lowerCAmelCase_ = "white" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 5 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , )-> Dict:
'''simple docstring'''
requires_backends(UpperCamelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
_UpperCAmelCase : Optional[Any] = textwrap.TextWrapper(width=80 )
_UpperCAmelCase : Optional[Any] = wrapper.wrap(text=UpperCamelCase__ )
_UpperCAmelCase : Dict = """\n""".join(UpperCamelCase__ )
if font_bytes is not None and font_path is None:
_UpperCAmelCase : Optional[Any] = io.BytesIO(UpperCamelCase__ )
elif font_path is not None:
_UpperCAmelCase : Dict = font_path
else:
_UpperCAmelCase : int = hf_hub_download(UpperCamelCase__ , """Arial.TTF""" )
_UpperCAmelCase : Any = ImageFont.truetype(UpperCamelCase__ , encoding="""UTF-8""" , size=UpperCamelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_UpperCAmelCase : str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCamelCase__ ) )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : int = temp_draw.textbbox((0, 0) , UpperCamelCase__ , UpperCamelCase__ )
# Create the actual image with a bit of padding around the text.
_UpperCAmelCase : Union[str, Any] = text_width + left_padding + right_padding
_UpperCAmelCase : List[Any] = text_height + top_padding + bottom_padding
_UpperCAmelCase : Any = Image.new("""RGB""" , (image_width, image_height) , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = ImageDraw.Draw(UpperCamelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase__ , fill=UpperCamelCase__ , font=UpperCamelCase__ )
return image
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
requires_backends(UpperCamelCase__ , """vision""" )
# Convert to PIL image if necessary
_UpperCAmelCase : List[Any] = to_pil_image(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = render_text(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase : Tuple = max(header_image.width , image.width )
_UpperCAmelCase : Union[str, Any] = int(image.height * (new_width / image.width) )
_UpperCAmelCase : int = int(header_image.height * (new_width / header_image.width) )
_UpperCAmelCase : Optional[Any] = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_UpperCAmelCase : Union[str, Any] = to_numpy_array(UpperCamelCase__ )
if infer_channel_dimension_format(UpperCamelCase__ ) == ChannelDimension.LAST:
_UpperCAmelCase : int = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST )
return new_image
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ['''flattened_patches''']
def __init__( self ,a_ = True ,a_ = True ,a_ = None ,a_ = 2_048 ,a_ = False ,**a_ ,) -> int:
super().__init__(**_a )
_UpperCAmelCase : Optional[int] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
_UpperCAmelCase : int = do_normalize
_UpperCAmelCase : int = do_convert_rgb
_UpperCAmelCase : Union[str, Any] = max_patches
_UpperCAmelCase : Optional[int] = is_vqa
def _snake_case ( self ,a_ ,a_ ,a_ ,**a_ ) -> Tuple:
requires_backends(self.extract_flattened_patches ,"""torch""" )
_check_torch_version()
# convert to torch
_UpperCAmelCase : Dict = to_channel_dimension_format(_a ,ChannelDimension.FIRST )
_UpperCAmelCase : str = torch.from_numpy(_a )
_UpperCAmelCase ,_UpperCAmelCase : str = patch_size["""height"""], patch_size["""width"""]
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = get_image_size(_a )
# maximize scale s.t.
_UpperCAmelCase : List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_UpperCAmelCase : int = max(min(math.floor(scale * image_height / patch_height ) ,_a ) ,1 )
_UpperCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) ,_a ) ,1 )
_UpperCAmelCase : Tuple = max(num_feasible_rows * patch_height ,1 )
_UpperCAmelCase : List[str] = max(num_feasible_cols * patch_width ,1 )
_UpperCAmelCase : Optional[int] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode="""bilinear""" ,align_corners=_a ,antialias=_a ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_UpperCAmelCase : List[Any] = torch_extract_patches(_a ,_a ,_a )
_UpperCAmelCase : str = patches.shape
_UpperCAmelCase : Optional[int] = patches_shape[1]
_UpperCAmelCase : Dict = patches_shape[2]
_UpperCAmelCase : str = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_UpperCAmelCase : Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_UpperCAmelCase : Optional[int] = torch.arange(_a ).reshape([rows, 1] ).repeat(1 ,_a ).reshape([rows * columns, 1] )
_UpperCAmelCase : List[str] = torch.arange(_a ).reshape([1, columns] ).repeat(_a ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_UpperCAmelCase : Union[str, Any] = row_ids.to(torch.floataa )
_UpperCAmelCase : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_UpperCAmelCase : str = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_UpperCAmelCase : Dict = torch.nn.functional.pad(_a ,[0, 0, 0, max_patches - (rows * columns)] ).float()
_UpperCAmelCase : Tuple = to_numpy_array(_a )
return result
def _snake_case ( self ,a_ ,a_ = None ,**a_ ) -> str:
if image.dtype == np.uinta:
_UpperCAmelCase : Dict = image.astype(np.floataa )
# take mean across the whole `image`
_UpperCAmelCase : Union[str, Any] = np.mean(_a )
_UpperCAmelCase : List[str] = np.std(_a )
_UpperCAmelCase : Tuple = max(_a ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_a ,mean=_a ,std=_a ,**_a )
def _snake_case ( self ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = ChannelDimension.FIRST ,**a_ ,) -> Optional[int]:
_UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : List[str] = patch_size if patch_size is not None else self.patch_size
_UpperCAmelCase : List[Any] = max_patches if max_patches is not None else self.max_patches
_UpperCAmelCase : Any = self.is_vqa
if kwargs.get("""data_format""" ,_a ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
_UpperCAmelCase : Optional[int] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : List[Any] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : int = [to_numpy_array(_a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
_UpperCAmelCase : Optional[int] = kwargs.pop("""font_bytes""" ,_a )
_UpperCAmelCase : Tuple = kwargs.pop("""font_path""" ,_a )
if isinstance(_a ,_a ):
_UpperCAmelCase : Any = [header_text] * len(_a )
_UpperCAmelCase : Dict = [
render_header(_a ,header_text[i] ,font_bytes=_a ,font_path=_a )
for i, image in enumerate(_a )
]
if do_normalize:
_UpperCAmelCase : Union[str, Any] = [self.normalize(image=_a ) for image in images]
# convert to torch tensor and permute
_UpperCAmelCase : List[Any] = [
self.extract_flattened_patches(image=_a ,max_patches=_a ,patch_size=_a )
for image in images
]
# create attention mask in numpy
_UpperCAmelCase : Dict = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_UpperCAmelCase : Dict = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} ,tensor_type=_a )
return encoded_outputs
| 363 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None )-> Tuple:
'''simple docstring'''
if start is None:
_UpperCAmelCase : Dict = 0
if end is None:
_UpperCAmelCase : Tuple = len(_a ) - 1
if start >= end:
return
_UpperCAmelCase : Union[str, Any] = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
_UpperCAmelCase : Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : str = {
"nielsr/canine-s": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
A_ : List[str] = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
A_ : Any = 0
A_ : Tuple = 0xe000
A_ : Optional[Any] = 0xe001
A_ : Tuple = 0xe002
A_ : Union[str, Any] = 0xe003
A_ : Dict = 0xe004
# Maps special codepoints to human-readable names.
A_ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
A_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,a_=chr(UpperCAmelCase_ ) ,a_=chr(UpperCAmelCase_ ) ,a_=chr(UpperCAmelCase_ ) ,a_=chr(UpperCAmelCase_ ) ,a_=chr(UpperCAmelCase_ ) ,a_=chr(UpperCAmelCase_ ) ,a_=False ,a_=2_048 ,**a_ ,) -> int:
_UpperCAmelCase : Any = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else bos_token
_UpperCAmelCase : int = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else eos_token
_UpperCAmelCase : Union[str, Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else sep_token
_UpperCAmelCase : Tuple = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else cls_token
_UpperCAmelCase : List[Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : List[Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,model_max_length=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
# Creates a mapping for looking up the IDs of special symbols.
_UpperCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_UpperCAmelCase : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_UpperCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_UpperCAmelCase : List[Any] = UNICODE_VOCAB_SIZE
_UpperCAmelCase : str = len(self._special_codepoints )
@property
def _snake_case ( self ) -> List[Any]:
return self._unicode_vocab_size
def _snake_case ( self ,a_ ) -> Optional[int]:
return list(UpperCAmelCase_ )
def _snake_case ( self ,a_ ) -> Tuple:
try:
return ord(UpperCAmelCase_ )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_ )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self ,a_ ) -> List[str]:
return "".join(UpperCAmelCase_ )
def _snake_case ( self ,a_ ,a_ = None ) -> Optional[Any]:
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
_UpperCAmelCase : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_ )) + [1]
return result
def _snake_case ( self ,a_ ,a_ = None ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
_UpperCAmelCase : Optional[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self ,a_ ,a_ = None ) -> Optional[Any]:
return ()
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> None:
_UpperCAmelCase : Optional[int] = order
# a_{0} ... a_{k}
_UpperCAmelCase : Optional[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase : Tuple = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase : Any = [0.0] * self.order
def _snake_case ( self ,a_ ,a_ ) -> None:
if len(lowerCAmelCase__ ) < self.order:
_UpperCAmelCase : Optional[int] = [1.0, *a_coeffs]
if len(lowerCAmelCase__ ) != self.order + 1:
_UpperCAmelCase : List[str] = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != self.order + 1:
_UpperCAmelCase : int = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
_UpperCAmelCase : int = a_coeffs
_UpperCAmelCase : str = b_coeffs
def _snake_case ( self ,a_ ) -> float:
_UpperCAmelCase : Optional[int] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase : int = self.input_history[:-1]
_UpperCAmelCase : Optional[int] = self.output_history[:-1]
_UpperCAmelCase : Optional[int] = sample
_UpperCAmelCase : Optional[Any] = result
return result
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self ,**a_ ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
requires_backends(self ,"""vision""" )
requires_backends(self ,"""torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(__lowerCamelCase )
def _snake_case ( self ,**a_ ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Tuple = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase : Tuple = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_UpperCAmelCase : Dict = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_UpperCAmelCase : Optional[int] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase : Any = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase : Dict = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase : Optional[int] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase : int = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_UpperCAmelCase : Tuple = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase : Any = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,a_ ,*a_ ,a_=None ,a_=None ,**a_ ) -> Optional[int]:
return super().__call__(__lowerCamelCase ,*__lowerCamelCase ,num_workers=__lowerCamelCase ,batch_size=__lowerCamelCase ,**__lowerCamelCase )
def _snake_case ( self ,a_ ,a_=64 ,a_ = 0 ,a_ = 512 / 1_500 ,a_ = 32 ,a_ = 1 ,) -> Dict:
_UpperCAmelCase : str = load_image(__lowerCamelCase )
_UpperCAmelCase : Any = self.image_processor.size["""longest_edge"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.image_processor.generate_crop_boxes(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
_UpperCAmelCase : Any = self.image_processor(images=__lowerCamelCase ,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase : List[Any] = self.get_inference_context()
with inference_context():
_UpperCAmelCase : List[Any] = self._ensure_tensor_on_device(__lowerCamelCase ,device=self.device )
_UpperCAmelCase : Optional[int] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_UpperCAmelCase : List[Any] = image_embeddings
_UpperCAmelCase : Union[str, Any] = grid_points.shape[1]
_UpperCAmelCase : List[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 ,__lowerCamelCase ,__lowerCamelCase ):
_UpperCAmelCase : Union[str, Any] = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase : Union[str, Any] = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase : int = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self ,a_ ,a_=0.88 ,a_=0.95 ,a_=0 ,a_=1 ,) -> Dict:
_UpperCAmelCase : Optional[int] = model_inputs.pop("""input_boxes""" )
_UpperCAmelCase : Optional[Any] = model_inputs.pop("""is_last""" )
_UpperCAmelCase : List[Any] = model_inputs.pop("""original_sizes""" ).tolist()
_UpperCAmelCase : Optional[Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_UpperCAmelCase : Optional[Any] = self.model(**__lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase : Optional[Any] = model_outputs["""pred_masks"""]
_UpperCAmelCase : List[Any] = self.image_processor.post_process_masks(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,binarize=__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = model_outputs["""iou_scores"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self ,a_ ,a_=False ,a_=False ,a_=0.7 ,) -> List[str]:
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_UpperCAmelCase : List[str] = torch.cat(__lowerCamelCase )
_UpperCAmelCase : int = torch.cat(__lowerCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self.image_processor.post_process_for_mask_generation(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
_UpperCAmelCase : Optional[int] = defaultdict(__lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCamelCase )
_UpperCAmelCase : List[Any] = {}
if output_rle_mask:
_UpperCAmelCase : Tuple = rle_mask
if output_bboxes_mask:
_UpperCAmelCase : List[str] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 367 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class lowercase ( A_ ):
"""simple docstring"""
UpperCAmelCase = """dpr"""
def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_="absolute" ,a_ = 0 ,**a_ ,) -> Optional[int]:
super().__init__(pad_token_id=_lowerCamelCase ,**_lowerCamelCase )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : int = projection_dim
_UpperCAmelCase : List[Any] = position_embedding_type
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=2 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=3 ,a_=4 ,a_=None ,a_=0 ,) -> Any:
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : List[str] = use_token_type_ids
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : List[Any] = scope
_UpperCAmelCase : Dict = projection_dim
def _snake_case ( self ) -> int:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[str] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCAmelCase : List[str] = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
_UpperCAmelCase : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
_UpperCAmelCase : List[Any] = TFDPRContextEncoder(config=_a )
_UpperCAmelCase : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a )
_UpperCAmelCase : List[str] = model(_a ,token_type_ids=_a )
_UpperCAmelCase : Dict = model(_a )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Any = TFDPRQuestionEncoder(config=_a )
_UpperCAmelCase : str = model(_a ,attention_mask=_a ,token_type_ids=_a )
_UpperCAmelCase : int = model(_a ,token_type_ids=_a )
_UpperCAmelCase : Optional[Any] = model(_a )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> List[str]:
_UpperCAmelCase : Optional[Any] = TFDPRReader(config=_a )
_UpperCAmelCase : Dict = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : Optional[int] = config_and_inputs
_UpperCAmelCase : Dict = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = TFDPRModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _snake_case ( self ) -> Any:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def _snake_case ( self ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_UpperCAmelCase : int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase : Dict = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase : List[str] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 369 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=7 ,a_=3 ,a_=18 ,a_=30 ,a_=400 ,a_=True ,a_=None ,a_=True ,) -> str:
_UpperCAmelCase : int = size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Tuple = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : int = size
_UpperCAmelCase : Tuple = apply_ocr
def _snake_case ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( _a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = LayoutLMvaImageProcessingTester(self )
@property
def _snake_case ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
self.assertTrue(hasattr(_a ,"""apply_ocr""" ) )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
_UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _snake_case ( self ) -> int:
pass
def _snake_case ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_a )
self.assertIsInstance(encoding.boxes ,_a )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
_UpperCAmelCase : List[str] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> int:
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
_UpperCAmelCase : Dict = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> Optional[Any]:
# with apply_OCR = True
_UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
_UpperCAmelCase : Dict = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_UpperCAmelCase : List[Any] = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_UpperCAmelCase : List[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_a )
self.assertListEqual(encoding.boxes ,_a )
# with apply_OCR = False
_UpperCAmelCase : Any = LayoutLMvaImageProcessor(apply_ocr=_a )
_UpperCAmelCase : Dict = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 370 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Tuple = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 371 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase ( __lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : int = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = 8
# DPR tok
_UpperCAmelCase : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCAmelCase : str = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__lowercase ,exist_ok=__lowercase )
_UpperCAmelCase : Optional[int] = os.path.join(__lowercase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_UpperCAmelCase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCAmelCase : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
_UpperCAmelCase : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : int = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__lowercase ,exist_ok=__lowercase )
_UpperCAmelCase : Union[str, Any] = os.path.join(__lowercase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Any = os.path.join(__lowercase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def _snake_case ( self ) -> Optional[Any]:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def _snake_case ( self ) -> Dict:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def _snake_case ( self ) -> List[str]:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def _snake_case ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> int:
_UpperCAmelCase : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _snake_case ( self ) -> int:
_UpperCAmelCase : Dict = self.get_dummy_dataset()
_UpperCAmelCase : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_UpperCAmelCase : Union[str, Any] = dataset
_UpperCAmelCase : str = RagRetriever(
__lowercase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def _snake_case ( self ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = self.get_dummy_dataset()
_UpperCAmelCase : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""custom""" ,)
if from_disk:
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname ,"""dataset""" )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname ,"""index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname ,"""index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"""dataset""" ) )
del dataset
_UpperCAmelCase : List[Any] = RagRetriever(
__lowercase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
_UpperCAmelCase : Dict = RagRetriever(
__lowercase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,__lowercase ) ,)
return retriever
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname ,"""hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" ,index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] ,open(index_file_name + """.index_meta.dpr""" ,"""wb""" ) )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname ,"""psgs_w100.tsv.pkl""" )
_UpperCAmelCase : Any = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__lowercase ,open(__lowercase ,"""wb""" ) )
_UpperCAmelCase : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""legacy""" ,index_path=self.tmpdirname ,)
_UpperCAmelCase : Any = RagRetriever(
__lowercase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
_UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : Optional[int] = retriever.retrieve(__lowercase ,n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,__lowercase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Any = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_UpperCAmelCase : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__lowercase )
_UpperCAmelCase : int = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : Optional[int] = retriever.retrieve(__lowercase ,n_docs=1 )
self.assertTrue(out is not None )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
_UpperCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : List[str] = retriever.retrieve(__lowercase ,n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,__lowercase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
_UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_UpperCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : str = retriever.retrieve(__lowercase ,n_docs=1 )
self.assertTrue(out is not None )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
_UpperCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : List[str] = retriever.retrieve(__lowercase ,n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,__lowercase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
_UpperCAmelCase : Any = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_UpperCAmelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : List[Any] = retriever.retrieve(__lowercase ,n_docs=1 )
self.assertTrue(out is not None )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Dict = self.get_dummy_legacy_index_retriever()
_UpperCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : int = retriever.retrieve(__lowercase ,n_docs=__lowercase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowercase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) ,__lowercase )
self.assertEqual(doc_dicts[0]["""text"""][0] ,"""bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] ,"""foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowercase )
_UpperCAmelCase : Tuple = RagRetriever.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_UpperCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : str = retriever.retrieve(__lowercase ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _snake_case ( self ) -> Any:
import torch
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Any = self.get_dummy_canonical_hf_index_retriever()
_UpperCAmelCase : List[Any] = [[5, 7], [10, 11]]
_UpperCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : Tuple = retriever(__lowercase ,__lowercase ,prefix=retriever.config.generator.prefix ,n_docs=__lowercase )
_UpperCAmelCase : str = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertIsInstance(__lowercase ,np.ndarray )
_UpperCAmelCase : Union[str, Any] = retriever(
__lowercase ,__lowercase ,prefix=retriever.config.generator.prefix ,n_docs=__lowercase ,return_tensors="""pt""" ,)
_UpperCAmelCase : Dict = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowercase ,torch.Tensor )
self.assertIsInstance(__lowercase ,torch.Tensor )
self.assertIsInstance(__lowercase ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__lowercase )
retriever.set_ctx_encoder_tokenizer(__lowercase )
_UpperCAmelCase : List[str] = [[5, 7], [10, 11]]
_UpperCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
_UpperCAmelCase : Tuple = retriever(__lowercase ,__lowercase ,prefix=retriever.config.generator.prefix ,n_docs=__lowercase )
self.assertEqual(
len(__lowercase ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) ,__lowercase ) # check for doc token related keys in dictionary.
| 350 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_ : Tuple = logging.get_logger(__name__)
class lowercase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self ,*a_ ,**a_ ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,a_ ,)
super().__init__(*a_ ,**a_ )
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ : Optional[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Dict = test_results.split(""" """ )
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase : str = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : int = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase : Dict = line
_UpperCAmelCase : Optional[int] = False
return failures
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> int:
_UpperCAmelCase : Optional[Any] = title
_UpperCAmelCase : Optional[Any] = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase : List[Any] = doc_test_results["""success"""]
_UpperCAmelCase : Tuple = doc_test_results["""failures"""]
_UpperCAmelCase : List[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase : int = doc_test_results
@property
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = [self._time_spent]
_UpperCAmelCase : str = 0
for time in time_spent:
_UpperCAmelCase : int = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__SCREAMING_SNAKE_CASE ) == 1:
_UpperCAmelCase : str = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : int = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f'''{int(__SCREAMING_SNAKE_CASE )}h{int(__SCREAMING_SNAKE_CASE )}m{int(__SCREAMING_SNAKE_CASE )}s'''
@property
def _snake_case ( self ) -> Optional[Any]:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self ) -> Optional[Any]:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _snake_case ( self ) -> Union[str, Any]:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = 40
_UpperCAmelCase : int = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )}
_UpperCAmelCase : Any = """"""
for category, failures in category_failures.items():
if len(__SCREAMING_SNAKE_CASE ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__SCREAMING_SNAKE_CASE )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__SCREAMING_SNAKE_CASE )
@staticmethod
def _snake_case ( ) -> str:
_UpperCAmelCase : List[Any] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__SCREAMING_SNAKE_CASE )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] ,text="""There was an issue running the tests.""" ,blocks=__SCREAMING_SNAKE_CASE ,)
def _snake_case ( self ) -> Any:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase : Optional[Any] = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
_UpperCAmelCase : Tuple = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] ,blocks=self.payload ,text=__SCREAMING_SNAKE_CASE ,)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> Dict:
_UpperCAmelCase : Dict = """"""
for key, value in failures.items():
_UpperCAmelCase : Optional[int] = value[:200] + """ [Truncated]""" if len(__SCREAMING_SNAKE_CASE ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
_UpperCAmelCase : Dict = job_name
_UpperCAmelCase : int = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase : List[Any] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self ) -> Union[str, Any]:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase : Union[str, Any] = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase : Tuple = sorted(self.doc_test_results.items() ,key=lambda a_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase : List[Any] = f'''*Num failures* :{len(job_result['failed'] )} \n'''
_UpperCAmelCase : Any = job_result["""failures"""]
_UpperCAmelCase : List[str] = self.get_reply_blocks(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,text=__SCREAMING_SNAKE_CASE )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] ,text=f'''Results for {job}''' ,blocks=__SCREAMING_SNAKE_CASE ,thread_ts=self.thread_ts["""ts"""] ,)
time.sleep(1 )
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_UpperCAmelCase : str = requests.get(lowerCAmelCase_ ).json()
_UpperCAmelCase : Any = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase : Optional[Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , lowerCAmelCase_ )
return {}
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
if os.path.exists(lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = os.listdir(lowerCAmelCase_ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}.''' ) from e
return _artifact
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[Any]:
_UpperCAmelCase : Any = name
_UpperCAmelCase : Tuple = []
def __str__( self ) -> Tuple:
return self.name
def _snake_case ( self ,a_ ) -> Optional[int]:
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase : List[Any] = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase : Any = Artifact(lowerCAmelCase_ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
A_ : List[Any] = get_job_links()
A_ : Dict = retrieve_available_artifacts()
A_ : Dict = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ : Union[str, Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ : List[Any] = github_actions_job_links.get("""run_doctests""")
A_ : Union[str, Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
A_ : str = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
A_ : Dict = handle_test_results(artifact["""stats"""])
A_ : Optional[int] = failed
A_ : Any = success
A_ : str = time_spent[1:-1] + """, """
A_ : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
A_ : Tuple = line.replace("""FAILED """, """""")
A_ : int = line.split()[0].replace("""\n""", """""")
if "::" in line:
A_ : Optional[int] = line.split("""::""")
else:
A_ : Union[str, Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ : str = all_failures[test] if test in all_failures else """N/A"""
A_ : Optional[int] = failure
break
A_ : Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 352 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
A_ : List[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
A_ : Optional[int] = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
@lru_cache()
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_UpperCAmelCase : int = bs[:]
_UpperCAmelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase : str = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = set()
_UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : Dict = char
return pairs
class lowercase ( snake_case_ ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self ,a_ ,a_ ,a_="replace" ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,a_=False ,**a_ ,) -> Dict:
_UpperCAmelCase : Any = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else bos_token
_UpperCAmelCase : Any = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else eos_token
_UpperCAmelCase : Optional[Any] = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else sep_token
_UpperCAmelCase : str = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else cls_token
_UpperCAmelCase : Any = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else unk_token
_UpperCAmelCase : Tuple = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
super().__init__(
errors=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,cls_token=a_ ,pad_token=a_ ,mask_token=a_ ,add_prefix_space=a_ ,**a_ ,)
with open(a_ ,encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(a_ )
_UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : List[str] = errors # how to handle errors in decoding
_UpperCAmelCase : Optional[Any] = bytes_to_unicode()
_UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(a_ ,encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase : Any = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase : Optional[Any] = dict(zip(a_ ,range(len(a_ ) ) ) )
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _snake_case ( self ) -> List[Any]:
return len(self.encoder )
def _snake_case ( self ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def _snake_case ( self ,a_ ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : List[str] = tuple(a_ )
_UpperCAmelCase : int = get_pairs(a_ )
if not pairs:
return token
while True:
_UpperCAmelCase : str = min(a_ ,key=lambda a_ : self.bpe_ranks.get(a_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase ,_UpperCAmelCase : Tuple = bigram
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = 0
while i < len(a_ ):
try:
_UpperCAmelCase : List[Any] = word.index(a_ ,a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : Dict = tuple(a_ )
_UpperCAmelCase : List[str] = new_word
if len(a_ ) == 1:
break
else:
_UpperCAmelCase : str = get_pairs(a_ )
_UpperCAmelCase : int = """ """.join(a_ )
_UpperCAmelCase : Optional[int] = word
return word
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : Optional[int] = []
for token in re.findall(self.pat ,a_ ):
_UpperCAmelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(""" """ ) )
return bpe_tokens
def _snake_case ( self ,a_ ) -> str:
return self.encoder.get(a_ ,self.encoder.get(self.unk_token ) )
def _snake_case ( self ,a_ ) -> Optional[int]:
return self.decoder.get(a_ )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = """""".join(a_ )
_UpperCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
a_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Optional[int] = os.path.join(
a_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a_ ,ensure_ascii=a_ ) + """\n""" )
_UpperCAmelCase : Optional[Any] = 0
with open(a_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase : Optional[Any] = token_index
writer.write(""" """.join(a_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ ,token_ids_a=a_ ,already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
_UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self ,a_ ,a_=False ,**a_ ) -> List[str]:
_UpperCAmelCase : str = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()):
_UpperCAmelCase : Any = """ """ + text
return (text, kwargs)
| 353 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ = get_tests_dir("""fixtures""")
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = mock.Mock()
_UpperCAmelCase : List[Any] = 500
_UpperCAmelCase : int = {}
_UpperCAmelCase : Union[str, Any] = HTTPError
_UpperCAmelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" ,return_value=_lowerCAmelCase ) as mock_head:
_UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls ) -> Dict:
_UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def _snake_case ( cls ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" ,use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase ,repo_id="""test-feature-extractor""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token )
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase ) )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" ,use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase ,repo_id="""valid_org/test-feature-extractor-org""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token )
_UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase ) )
def _snake_case ( self ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
_UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} ,)
_UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' ,trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"""CustomFeatureExtractor""" )
| 354 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> list[float]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : List[str] = coefficient_matrix.shape
_UpperCAmelCase ,_UpperCAmelCase : int = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase : List[Any] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(_A )
if colsa != 1:
_UpperCAmelCase : Union[str, Any] = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(_A )
if rowsa != rowsa:
_UpperCAmelCase : List[str] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(_A )
if len(_A ) != rowsa:
_UpperCAmelCase : Optional[int] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(_A )} and {rowsa}'''
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_UpperCAmelCase : int = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
_UpperCAmelCase : Optional[int] = []
for row in range(_A ):
_UpperCAmelCase : Optional[Any] = 0
for col in range(_A ):
if col == row:
_UpperCAmelCase : Dict = table[row][col]
elif col == cols - 1:
_UpperCAmelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(_A )
_UpperCAmelCase : Dict = new_val
return [float(_A ) for i in new_val]
def snake_case_ ( lowerCAmelCase_ )-> bool:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = table.shape
_UpperCAmelCase : int = True
for i in range(0 , _A ):
_UpperCAmelCase : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
A_ : Optional[Any] = """"""
A_ : str = """"""
A_ : str = """"""
A_ : Tuple = """"""
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[str] = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase : List[str] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase : int = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
_UpperCAmelCase : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase : Optional[int] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
_UpperCAmelCase : str = alltweets[-1].id - 1
print(F'''...{len(lowerCAmelCase_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
_UpperCAmelCase : List[Any] = csv.writer(lowerCAmelCase_ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , np.ndarray ):
return list(tensor.shape )
_UpperCAmelCase : List[Any] = tf.shape(lowerCAmelCase_ )
if tensor.shape == tf.TensorShape(lowerCAmelCase_ ):
return dynamic
_UpperCAmelCase : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCAmelCase_ )]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None )-> Optional[Any]:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCAmelCase_ , name=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-5 , lowerCAmelCase_=-1 )-> List[Any]:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
_UpperCAmelCase : Dict = tf.nn.moments(lowerCAmelCase_ , axes=[axis] , keepdims=lowerCAmelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCAmelCase : Tuple = [1] * inputs.shape.rank
_UpperCAmelCase : List[str] = shape_list(lowerCAmelCase_ )[axis]
_UpperCAmelCase : Union[str, Any] = tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : int = tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
# Compute layer normalization using the batch_normalization
# function.
_UpperCAmelCase : List[Any] = tf.nn.batch_normalization(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , offset=lowerCAmelCase_ , scale=lowerCAmelCase_ , variance_epsilon=lowerCAmelCase_ , )
return outputs
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=-1 )-> Tuple:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCAmelCase : Union[str, Any] = tf.shape(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCAmelCase : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , tf.Tensor ):
_UpperCAmelCase : int = tf.convert_to_tensor(lowerCAmelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCAmelCase : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCAmelCase : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCAmelCase : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "input_ids" )-> Tuple:
'''simple docstring'''
tf.debugging.assert_less(
lowerCAmelCase_ , tf.cast(lowerCAmelCase_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCAmelCase_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCAmelCase : List[str] = [x for x in data if len(lowerCAmelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_UpperCAmelCase : Optional[Any] = np.asarray(lowerCAmelCase_ )
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Tuple = np.array_split(lowerCAmelCase_ , lowerCAmelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCAmelCase : Any = np.array_split(lowerCAmelCase_ , lowerCAmelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = chunk_data
else:
_UpperCAmelCase : List[Any] = data
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
if name in group.attrs:
_UpperCAmelCase : List[Any] = [n.decode("""utf8""" ) if hasattr(lowerCAmelCase_ , """decode""" ) else n for n in group.attrs[name]]
else:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(lowerCAmelCase_ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
def _expand_single_ad_tensor(lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCAmelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCAmelCase_ )
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A_ : List[Any] = 0
A_ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A_ : Dict = tuple[int, int]
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Optional[Any]:
_UpperCAmelCase : Dict = pos_x
_UpperCAmelCase : List[Any] = pos_y
_UpperCAmelCase : Dict = (pos_y, pos_x)
_UpperCAmelCase : List[str] = goal_x
_UpperCAmelCase : Union[str, Any] = goal_y
_UpperCAmelCase : str = g_cost
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Dict = self.calculate_heuristic()
_UpperCAmelCase : Union[str, Any] = self.g_cost + self.h_cost
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = self.pos_x - self.goal_x
_UpperCAmelCase : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase__ ) + abs(lowercase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self ,a_ ) -> Any:
return self.f_cost < other.f_cost
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Optional[Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowercase__ )
_UpperCAmelCase : Optional[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99_999 ,lowercase__ )
_UpperCAmelCase : List[str] = [self.start]
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = False
def _snake_case ( self ) -> Tuple:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase__ )
self.closed_nodes.append(lowercase__ )
_UpperCAmelCase : List[str] = self.get_successors(lowercase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
_UpperCAmelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase__ )
else:
self.open_nodes.append(lowercase__ )
return [self.start.pos]
def _snake_case ( self ,a_ ) -> str:
_UpperCAmelCase : List[Any] = []
for action in delta:
_UpperCAmelCase : Tuple = parent.pos_x + action[1]
_UpperCAmelCase : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase__ ,lowercase__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowercase__ ,) )
return successors
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : List[str] = node
_UpperCAmelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Dict = current_node.parent
path.reverse()
return path
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> Tuple:
_UpperCAmelCase : Optional[int] = AStar(lowercase__ ,lowercase__ )
_UpperCAmelCase : Optional[Any] = AStar(lowercase__ ,lowercase__ )
_UpperCAmelCase : List[Any] = False
def _snake_case ( self ) -> str:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_UpperCAmelCase : Any = self.fwd_astar.open_nodes.pop(0 )
_UpperCAmelCase : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase__ ,lowercase__ )
self.fwd_astar.closed_nodes.append(lowercase__ )
self.bwd_astar.closed_nodes.append(lowercase__ )
_UpperCAmelCase : List[str] = current_bwd_node
_UpperCAmelCase : Union[str, Any] = current_fwd_node
_UpperCAmelCase : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
_UpperCAmelCase : Dict = astar.open_nodes.pop(
astar.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase__ )
else:
astar.open_nodes.append(lowercase__ )
return [self.fwd_astar.start.pos]
def _snake_case ( self ,a_ ,a_ ) -> Tuple:
_UpperCAmelCase : Optional[Any] = self.fwd_astar.retrace_path(lowercase__ )
_UpperCAmelCase : Dict = self.bwd_astar.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCAmelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A_ : List[Any] = (0, 0)
A_ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A_ : str = time.time()
A_ : Union[str, Any] = AStar(init, goal)
A_ : Tuple = a_star.search()
A_ : Dict = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
A_ : Optional[Any] = time.time()
A_ : str = BidirectionalAStar(init, goal)
A_ : str = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> bool:
'''simple docstring'''
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(a__ , config=a__ )
_UpperCAmelCase : Tuple = downstream_dict["""projector.weight"""]
_UpperCAmelCase : str = downstream_dict["""projector.bias"""]
_UpperCAmelCase : List[str] = downstream_dict["""model.post_net.linear.weight"""]
_UpperCAmelCase : Dict = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(a__ , config=a__ )
_UpperCAmelCase : List[Any] = downstream_dict["""model.linear.weight"""]
_UpperCAmelCase : str = downstream_dict["""model.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = UniSpeechSatForXVector.from_pretrained(a__ , config=a__ )
_UpperCAmelCase : int = downstream_dict["""connector.weight"""]
_UpperCAmelCase : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : Tuple = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase : str = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_UpperCAmelCase : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_UpperCAmelCase : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_UpperCAmelCase : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_UpperCAmelCase : Tuple = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = torch.load(a__ , map_location="""cpu""" )
_UpperCAmelCase : Optional[int] = checkpoint["""Downstream"""]
_UpperCAmelCase : Optional[int] = UniSpeechSatConfig.from_pretrained(a__ )
_UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
_UpperCAmelCase : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
_UpperCAmelCase : Any = convert_classification(a__ , a__ , a__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
_UpperCAmelCase : Optional[Any] = convert_diarization(a__ , a__ , a__ )
elif arch.endswith("""ForXVector""" ):
_UpperCAmelCase : Union[str, Any] = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : int = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A_ : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 360 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Tuple = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase ( __lowercase ):
"""simple docstring"""
UpperCAmelCase = '''glpn'''
def __init__( self ,a_=3 ,a_=4 ,a_=[2, 2, 2, 2] ,a_=[8, 4, 2, 1] ,a_=[32, 64, 160, 256] ,a_=[7, 3, 3, 3] ,a_=[4, 2, 2, 2] ,a_=[1, 2, 5, 8] ,a_=[4, 4, 4, 4] ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=0.1 ,a_=1E-6 ,a_=64 ,a_=10 ,a_=-1 ,**a_ ,) -> Optional[int]:
super().__init__(**UpperCAmelCase__ )
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : int = num_encoder_blocks
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : List[str] = sr_ratios
_UpperCAmelCase : List[str] = hidden_sizes
_UpperCAmelCase : str = patch_sizes
_UpperCAmelCase : Optional[int] = strides
_UpperCAmelCase : Tuple = mlp_ratios
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Any = drop_path_rate
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = decoder_hidden_size
_UpperCAmelCase : List[str] = max_depth
_UpperCAmelCase : Any = head_in_index
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase = ["""pixel_values"""]
def __init__( self ,a_ = True ,a_ = None ,a_ = PILImageResampling.BICUBIC ,a_ = True ,a_ = None ,a_ = True ,a_ = 1 / 255 ,a_ = True ,a_ = IMAGENET_DEFAULT_MEAN ,a_ = IMAGENET_DEFAULT_STD ,**a_ ,) -> None:
super().__init__(**_a )
_UpperCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 224}
_UpperCAmelCase : int = get_size_dict(_a ,default_to_square=_a )
_UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase : str = get_size_dict(_a ,param_name="""crop_size""" )
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : int = do_center_crop
_UpperCAmelCase : str = crop_size
_UpperCAmelCase : str = do_rescale
_UpperCAmelCase : Any = rescale_factor
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self ,a_ ,a_ ,a_ = PILImageResampling.BICUBIC ,a_ = None ,**a_ ,) -> np.ndarray:
_UpperCAmelCase : str = get_size_dict(_a ,default_to_square=_a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : Any = int((256 / 224) * size["""shortest_edge"""] )
_UpperCAmelCase : Any = get_resize_output_image_size(_a ,size=_a ,default_to_square=_a )
_UpperCAmelCase : int = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_a ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=_a ,data_format=_a ,**_a )
def _snake_case ( self ,a_ ,a_ ,a_ = None ,**a_ ,) -> np.ndarray:
_UpperCAmelCase : Dict = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _snake_case ( self ,a_ ,a_ ,a_ = None ,**a_ ,) -> np.ndarray:
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ = None ,**a_ ,) -> np.ndarray:
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _snake_case ( self ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = ChannelDimension.FIRST ,**a_ ,) -> BatchFeature:
_UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[str] = size if size is not None else self.size
_UpperCAmelCase : Union[str, Any] = get_size_dict(_a ,default_to_square=_a )
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : str = get_size_dict(_a ,param_name="""crop_size""" )
_UpperCAmelCase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
_UpperCAmelCase : Dict = [self.resize(_a ,_a ,_a ) for image in images]
if do_center_crop:
_UpperCAmelCase : Tuple = [self.center_crop(_a ,_a ) for image in images]
if do_rescale:
_UpperCAmelCase : int = [self.rescale(_a ,_a ) for image in images]
if do_normalize:
_UpperCAmelCase : Union[str, Any] = [self.normalize(_a ,_a ,_a ) for image in images]
_UpperCAmelCase : int = [to_channel_dimension_format(_a ,_a ) for image in images]
_UpperCAmelCase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> str:
_UpperCAmelCase ,_UpperCAmelCase : List[str] = text, pattern
_UpperCAmelCase ,_UpperCAmelCase : List[str] = len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ,a_ ) -> int:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _snake_case ( self ,a_ ) -> Any:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _snake_case ( self ) -> Tuple:
# searches pattern in text and returns index positions
_UpperCAmelCase : Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCAmelCase : Tuple = self.mismatch_in_text(__SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : str = self.match_in_pattern(self.text[mismatch_index] )
_UpperCAmelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : int = 'ABAABA'
A_ : Optional[Any] = 'AB'
A_ : Any = BoyerMooreSearch(text, pattern)
A_ : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 363 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ShapEImgaImgPipeline
UpperCAmelCase = ["""image"""]
UpperCAmelCase = ["""image"""]
UpperCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _snake_case ( self ) -> Any:
return 32
@property
def _snake_case ( self ) -> str:
return 32
@property
def _snake_case ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> str:
return 8
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
_UpperCAmelCase : List[Any] = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ) -> int:
_UpperCAmelCase : Union[str, Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,)
return image_processor
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase : Any = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.dummy_prior
_UpperCAmelCase : Dict = self.dummy_image_encoder
_UpperCAmelCase : Tuple = self.dummy_image_processor
_UpperCAmelCase : Any = self.dummy_renderer
_UpperCAmelCase : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" ,num_train_timesteps=1_024 ,prediction_type="""sample""" ,use_karras_sigmas=_SCREAMING_SNAKE_CASE ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=1.0 ,)
_UpperCAmelCase : Tuple = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self ,a_ ,a_=0 ) -> Dict:
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_UpperCAmelCase : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = "cpu"
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : List[str] = output.images[0]
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase : int = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ) -> int:
_UpperCAmelCase : int = torch_device == "cpu"
_UpperCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,)
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = 1
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase : Dict = batch_size * [inputs[key]]
_UpperCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
_UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
_UpperCAmelCase : List[str] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
_UpperCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
_UpperCAmelCase : Any = pipe(
_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 364 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A_ : List[Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
A_ : Union[str, Any] = json.load(f)
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ,a_ ) -> List[str]:
return FSMTTokenizer.from_pretrained(lowerCamelCase_ )
def _snake_case ( self ,a_ ) -> Dict:
_UpperCAmelCase : Tuple = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _snake_case ( self ,a_ ,a_ ) -> int:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCAmelCase : str = f'''facebook/wmt19-{pair}'''
_UpperCAmelCase : Tuple = self.get_tokenizer(lowerCamelCase_ )
_UpperCAmelCase : str = self.get_model(lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = bleu_data[pair]["""src"""]
_UpperCAmelCase : Union[str, Any] = bleu_data[pair]["""tgt"""]
_UpperCAmelCase : Optional[Any] = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ,truncation=lowerCamelCase_ ,padding="""longest""" ).to(lowerCamelCase_ )
_UpperCAmelCase : Tuple = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = calculate_bleu(lowerCamelCase_ ,lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertGreaterEqual(scores["""bleu"""] ,lowerCamelCase_ )
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DPMSolverSinglestepScheduler,)
UpperCAmelCase = (("""num_inference_steps""", 25),)
def _snake_case ( self ,**a_ ) -> Dict:
_UpperCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_lowercase )
return config
def _snake_case ( self ,a_=0 ,**a_ ) -> List[Any]:
_UpperCAmelCase : Dict = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop('num_inference_steps' ,_lowercase )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Optional[int] = 0.1 * sample
_UpperCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_UpperCAmelCase : Optional[int] = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_UpperCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase ,_UpperCAmelCase : int = sample, sample
for t in range(_lowercase ,time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase : Union[str, Any] = scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ,a_=0 ,**a_ ) -> Tuple:
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop('num_inference_steps' ,_lowercase )
_UpperCAmelCase : Optional[Any] = self.dummy_sample
_UpperCAmelCase : List[Any] = 0.1 * sample
_UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_UpperCAmelCase : Dict = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase : Any = scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ,a_=None ,**a_ ) -> List[str]:
if scheduler is None:
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : Optional[int] = scheduler_class(**_lowercase )
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : Dict = scheduler_class(**_lowercase )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Any = model(_lowercase ,_lowercase )
_UpperCAmelCase : List[str] = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : str = 50
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase : Dict = model(_lowercase ,_lowercase )
_UpperCAmelCase : str = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> str:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _snake_case ( self ) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : str = self.full_loop(scheduler=_lowercase )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_UpperCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase : List[Any] = self.full_loop(scheduler=_lowercase )
_UpperCAmelCase : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> Optional[int]:
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase ,prediction_type=_lowercase ,sample_max_value=_lowercase ,algorithm_type='dpmsolver++' ,solver_order=_lowercase ,solver_type=_lowercase ,)
def _snake_case ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _snake_case ( self ) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase ,solver_type=_lowercase ,prediction_type=_lowercase ,algorithm_type=_lowercase ,)
_UpperCAmelCase : Optional[Any] = self.full_loop(
solver_order=_lowercase ,solver_type=_lowercase ,prediction_type=_lowercase ,algorithm_type=_lowercase ,)
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Tuple:
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def _snake_case ( self ) -> int:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> Union[str, Any]:
self.check_over_configs(variance_type=_lowercase )
self.check_over_configs(variance_type='learned_range' )
def _snake_case ( self ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_lowercase ,time_step=0 )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = self.full_loop()
_UpperCAmelCase : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.full_loop(use_karras_sigmas=_lowercase )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = self.full_loop(prediction_type='v_prediction' )
_UpperCAmelCase : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = self.full_loop(prediction_type='v_prediction' ,use_karras_sigmas=_lowercase )
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config(thresholding=_lowercase ,dynamic_thresholding_ratio=0 )
_UpperCAmelCase : Optional[Any] = scheduler_class(**_lowercase )
_UpperCAmelCase : int = 10
_UpperCAmelCase : int = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Dict = model(_lowercase ,_lowercase )
_UpperCAmelCase : List[str] = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=4 ,) -> List[str]:
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Tuple = use_attention_mask
_UpperCAmelCase : List[str] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : str = num_choices
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=_UpperCAmelCase ,)
return config, input_ids, attention_mask
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[int] = FlaxDistilBertModelTester(self )
@slow
def _snake_case ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_UpperCAmelCase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_UpperCAmelCase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Optional[int] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )[0]
_UpperCAmelCase : List[Any] = (1, 11, 768)
self.assertEqual(output.shape ,_UpperCAmelCase )
_UpperCAmelCase : Dict = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,_UpperCAmelCase ,atol=1E-4 ) )
| 367 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A_ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
A_ : List[Any] = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
A_ : List[str] = '''▁'''
class lowercase ( _a ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self ,a_ ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,a_ = None ,**a_ ,) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : int = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
_UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
_UpperCAmelCase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_UpperCAmelCase : Tuple = len(self.sp_model ) - 1
_UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self ,a_ ,a_ = None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self ,a_ ,a_ = None ) -> Dict:
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self ,a_ ) -> Any:
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def _snake_case ( self ,a_ ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : List[Any] = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _snake_case ( self ,a_ ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _snake_case ( self ,a_ ) -> List[str]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : str = """"""
_UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : str = None
return state
def __setstate__( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_UpperCAmelCase : Any = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self ,a_ ,a_ = None ) -> Optional[Any]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,"""wb""" ) as fi:
_UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(a__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a__ , a__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ : List[Any] = 3_0_0 # TEMPERATURE (unit = K)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import math
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = input("""Enter message: """ )
_UpperCAmelCase : Tuple = int(input(F'''Enter key [2-{len(lowerCAmelCase_ ) - 1}]: ''' ) )
_UpperCAmelCase : List[str] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_UpperCAmelCase : int = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
_UpperCAmelCase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [''''''] * key
for col in range(lowerCAmelCase_ ):
_UpperCAmelCase : Any = col
while pointer < len(lowerCAmelCase_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = math.ceil(len(lowerCAmelCase_ ) / key )
_UpperCAmelCase : Union[str, Any] = key
_UpperCAmelCase : Dict = (num_cols * num_rows) - len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = [''''''] * num_cols
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCAmelCase : Tuple = 0
row += 1
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 371 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ : List[str] = 1_0
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = max(lowerCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
_UpperCAmelCase : Dict = [[] for _ in range(lowerCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_UpperCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(lowerCamelCase__ )
# put each buckets' contents into list_of_ints
_UpperCAmelCase : Optional[int] = 0
for b in range(lowerCamelCase__ ):
for i in buckets[b]:
_UpperCAmelCase : Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
A_ : List[Any] = datasets.load_iris()
A_ : List[str] = np.array(data["""data"""])
A_ : List[Any] = np.array(data["""target"""])
A_ : Union[str, Any] = data["""target_names"""]
A_ , A_ , A_ , A_ : str = train_test_split(X, y)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
_UpperCAmelCase : Tuple = []
for data_point in data:
_UpperCAmelCase : int = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCAmelCase : List[Any] = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCAmelCase : Tuple = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
A_ : List[Any] = ''
A_ : Union[str, Any] = ''
A_ : List[Any] = ''
A_ : Optional[int] = ''
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = tweepy.OAuthHandler(UpperCamelCase__ , UpperCamelCase__ )
auth.set_access_token(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = tweepy.API(UpperCamelCase__ )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase : str = api.user_timeline(screen_name=UpperCamelCase__ , count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# save the id of the oldest tweet less one
_UpperCAmelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase : List[Any] = api.user_timeline(
screen_name=UpperCamelCase__ , count=200 , max_id=UpperCamelCase__ )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# update the id of the oldest tweet less one
_UpperCAmelCase : Tuple = alltweets[-1].id - 1
print(F'''...{len(UpperCamelCase__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
_UpperCAmelCase : Any = csv.writer(UpperCamelCase__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 352 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 0 |
'''simple docstring'''
from math import factorial
def snake_case_ ( lowerCAmelCase_ = 20 )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase : int = n // 2
return int(factorial(UpperCAmelCase_ ) / (factorial(UpperCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 353 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls ) -> str:
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(a_ )
@classmethod
def _snake_case ( cls ) -> Tuple:
try:
delete_repo(token=cls._token ,repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : int = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub("""test-config""" ,use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ ,getattr(a_ ,a_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a_ ,repo_id="""test-config""" ,push_to_hub=a_ ,use_auth_token=self._token )
_UpperCAmelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ ,getattr(a_ ,a_ ) )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" ,use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ ,getattr(a_ ,a_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a_ ,repo_id="""valid_org/test-config-org""" ,push_to_hub=a_ ,use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ ,getattr(a_ ,a_ ) )
def _snake_case ( self ) -> int:
CustomConfig.register_for_auto_class()
_UpperCAmelCase : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{"""AutoConfig""": """custom_configuration.CustomConfig"""} )
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' ,trust_remote_code=a_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,"""CustomConfig""" )
self.assertEqual(new_config.attribute ,42 )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Optional[int] = not c.scale_attn_weights # bool
_UpperCAmelCase : str = c.summary_type + "foo" # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(a_ ,c.n_embd ,"""mismatch for key: n_embd""" )
self.assertEqual(a_ ,c.resid_pdrop ,"""mismatch for key: resid_pdrop""" )
self.assertEqual(a_ ,c.scale_attn_weights ,"""mismatch for key: scale_attn_weights""" )
self.assertEqual(a_ ,c.summary_type ,"""mismatch for key: summary_type""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[int] = PretrainedConfig()
_UpperCAmelCase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
a_ ,["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(a_ ,a_ )]
if len(a_ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f''' {', '.join(a_ )}.''' )
def _snake_case ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
_UpperCAmelCase : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" ,subfolder="""bert""" )
self.assertIsNotNone(a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = mock.Mock()
_UpperCAmelCase : Tuple = 500
_UpperCAmelCase : str = {}
_UpperCAmelCase : Dict = HTTPError
_UpperCAmelCase : Dict = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" ,return_value=a_ ) as mock_head:
_UpperCAmelCase : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : Union[str, Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(a_ )
_UpperCAmelCase : Union[str, Any] = 2
json.dump(configuration.to_dict() ,open(os.path.join(a_ ,"""config.4.0.0.json""" ) ,"""w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(a_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Optional[Any] = ["config.42.0.0.json"]
_UpperCAmelCase : Optional[int] = 768
configuration.save_pretrained(a_ )
shutil.move(os.path.join(a_ ,"""config.4.0.0.json""" ) ,os.path.join(a_ ,"""config.42.0.0.json""" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(a_ )
self.assertEqual(new_configuration.hidden_size ,768 )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Optional[int] = "v4.0.0"
_UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
a_ ,return_unused_kwargs=a_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(a_ ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : Union[str, Any] = "v3.0.0"
_UpperCAmelCase : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(a_ )
self.assertEqual(old_configuration.hidden_size ,768 )
| 354 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 0 |
'''simple docstring'''
A_ : Union[str, Any] = 8.314_4598
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A_ : Union[str, Any] = 3_0_0
A_ : Union[str, Any] = 2_8
A_ : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = text, pattern
_UpperCAmelCase ,_UpperCAmelCase : Tuple = len(lowercase_ ), len(lowercase_ )
def _snake_case ( self ,a_ ) -> Optional[Any]:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _snake_case ( self ,a_ ) -> int:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _snake_case ( self ) -> Any:
# searches pattern in text and returns index positions
_UpperCAmelCase : Any = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCAmelCase : Optional[Any] = self.mismatch_in_text(lowercase_ )
if mismatch_index == -1:
positions.append(lowercase_ )
else:
_UpperCAmelCase : Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
_UpperCAmelCase : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : Tuple = 'ABAABA'
A_ : Any = 'AB'
A_ : Optional[int] = BoyerMooreSearch(text, pattern)
A_ : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : str = AutoConfig.from_pretrained(lowercase_ )
_UpperCAmelCase : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
_UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(lowercase_ )
_UpperCAmelCase : int = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_UpperCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase : Optional[int] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase : Optional[Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase : Dict = F'''layers_{str(lowercase_ )}'''
# Self-Attention
_UpperCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_UpperCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_UpperCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_UpperCAmelCase : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_UpperCAmelCase : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_UpperCAmelCase : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_UpperCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_UpperCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_UpperCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_UpperCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_UpperCAmelCase : List[Any] = flax_model.params["""encoder"""]["""block"""][str(lowercase_ )]["""layer"""]
_UpperCAmelCase : Optional[Any] = tax_attention_key
_UpperCAmelCase : Tuple = tax_attention_out
_UpperCAmelCase : Optional[int] = tax_attention_query
_UpperCAmelCase : str = tax_attention_value
_UpperCAmelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase : Optional[int] = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase : Optional[int] = tax_mlp_wi_a
_UpperCAmelCase : Dict = tax_mlp_wi_a
else:
_UpperCAmelCase : List[Any] = tax_mlp_wi
_UpperCAmelCase : Tuple = tax_mlp_wo
_UpperCAmelCase : Optional[Any] = tax_mlp_layer_norm
_UpperCAmelCase : Dict = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_UpperCAmelCase : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase : str = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_UpperCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_UpperCAmelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase : Optional[int] = F'''layers_{str(lowercase_ )}'''
# Self-Attention
_UpperCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_UpperCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_UpperCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_UpperCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_UpperCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_UpperCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_UpperCAmelCase : int = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_UpperCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_UpperCAmelCase : Optional[int] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_UpperCAmelCase : List[str] = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_UpperCAmelCase : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_UpperCAmelCase : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_UpperCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_UpperCAmelCase : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_UpperCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_UpperCAmelCase : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_UpperCAmelCase : List[Any] = flax_model.params["""decoder"""]["""block"""][str(lowercase_ )]["""layer"""]
_UpperCAmelCase : Any = tax_attention_key
_UpperCAmelCase : Tuple = tax_attention_out
_UpperCAmelCase : Union[str, Any] = tax_attention_query
_UpperCAmelCase : Tuple = tax_attention_value
_UpperCAmelCase : List[Any] = tax_pre_attention_layer_norm
_UpperCAmelCase : int = tax_enc_dec_attention_key
_UpperCAmelCase : Any = tax_enc_dec_attention_out
_UpperCAmelCase : Union[str, Any] = tax_enc_dec_attention_query
_UpperCAmelCase : Dict = tax_enc_dec_attention_value
_UpperCAmelCase : Tuple = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase : Optional[int] = tax_mlp_wi_a
_UpperCAmelCase : List[str] = tax_mlp_wi_a
else:
_UpperCAmelCase : int = tax_mlp_wi
_UpperCAmelCase : str = tax_mlp_wo
_UpperCAmelCase : List[Any] = txa_mlp_layer_norm
_UpperCAmelCase : Tuple = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_UpperCAmelCase : List[Any] = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_UpperCAmelCase : int = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase : Optional[Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_UpperCAmelCase : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase : int = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowercase_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
A_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : List[Any] = """▁"""
A_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
A_ : List[Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A_ : str = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self ,a_ ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,a_ = None ,**a_ ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Dict = AddedToken(__A ,lstrip=__A ,rstrip=__A ) if isinstance(__A ,__A ) else mask_token
_UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A ,eos_token=__A ,unk_token=__A ,sep_token=__A ,cls_token=__A ,pad_token=__A ,mask_token=__A ,sp_model_kwargs=self.sp_model_kwargs ,**__A ,)
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
_UpperCAmelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : int = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
_UpperCAmelCase : Any = self.__dict__.copy()
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,a_ ) -> Tuple:
_UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A ,token_ids_a=__A ,already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ) -> int:
_UpperCAmelCase : int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self ,a_ ) -> List[str]:
return self.sp_model.encode(__A ,out_type=__A )
def _snake_case ( self ,a_ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : str = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self ,a_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self ,a_ ) -> List[Any]:
_UpperCAmelCase : str = """""".join(__A ).replace(__A ,""" """ ).strip()
return out_string
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
__A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A ,"""wb""" ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 359 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase = ['vqvae']
def __init__( self ,a_ ,a_ ,a_ ,a_ ,) -> Any:
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,mel=_SCREAMING_SNAKE_CASE ,vqvae=_SCREAMING_SNAKE_CASE )
def _snake_case ( self ) -> int:
return 50 if isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE ) else 1_000
@torch.no_grad()
def __call__( self ,a_ = 1 ,a_ = None ,a_ = None ,a_ = 0 ,a_ = 0 ,a_ = None ,a_ = None ,a_ = 0 ,a_ = 0 ,a_ = None ,a_ = 0 ,a_ = None ,a_ = None ,a_=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_UpperCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_UpperCAmelCase : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCAmelCase : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_SCREAMING_SNAKE_CASE ,device=self.device ,)
_UpperCAmelCase : List[str] = noise
_UpperCAmelCase : Tuple = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_UpperCAmelCase : str = (input_image / 255) * 2 - 1
_UpperCAmelCase : List[str] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_UpperCAmelCase : List[str] = self.vqvae.encode(torch.unsqueeze(_SCREAMING_SNAKE_CASE ,0 ) ).latent_dist.sample(
generator=_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCAmelCase : int = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.scheduler.timesteps[start_step - 1] )
_UpperCAmelCase : Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCAmelCase : Optional[int] = int(mask_start_secs * pixels_per_second )
_UpperCAmelCase : Tuple = int(mask_end_secs * pixels_per_second )
_UpperCAmelCase : Dict = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
else:
_UpperCAmelCase : int = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
if isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,sample=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,)["""prev_sample"""]
else:
_UpperCAmelCase : Optional[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE ,timestep=_SCREAMING_SNAKE_CASE ,sample=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_UpperCAmelCase : Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCAmelCase : int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCAmelCase : List[Any] = 1 / self.vqvae.config.scaling_factor * images
_UpperCAmelCase : Dict = self.vqvae.decode(_SCREAMING_SNAKE_CASE )["""sample"""]
_UpperCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_UpperCAmelCase : Union[str, Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_UpperCAmelCase : int = (images * 255).round().astype("""uint8""" )
_UpperCAmelCase : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_SCREAMING_SNAKE_CASE ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
_UpperCAmelCase : List[Any] = [self.mel.image_to_audio(_SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def _snake_case ( self ,a_ ,a_ = 50 ) -> np.ndarray:
assert isinstance(self.scheduler ,_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_UpperCAmelCase : str = (sample / 255) * 2 - 1
_UpperCAmelCase : int = torch.Tensor(_SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_UpperCAmelCase : Any = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCAmelCase : Tuple = self.scheduler.alphas_cumprod[t]
_UpperCAmelCase : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCAmelCase : List[str] = 1 - alpha_prod_t
_UpperCAmelCase : Dict = self.unet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )["""sample"""]
_UpperCAmelCase : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCAmelCase : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCAmelCase : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _snake_case ( a_ ,a_ ,a_ ) -> torch.Tensor:
_UpperCAmelCase : Optional[int] = acos(torch.dot(torch.flatten(_SCREAMING_SNAKE_CASE ) ,torch.flatten(_SCREAMING_SNAKE_CASE ) ) / torch.norm(_SCREAMING_SNAKE_CASE ) / torch.norm(_SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(_SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(_SCREAMING_SNAKE_CASE )
| 360 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
_UpperCAmelCase : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
_UpperCAmelCase : int = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = get_activation("""swish""" )
self.assertIsInstance(snake_case__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Any = get_activation("""silu""" )
self.assertIsInstance(snake_case__ ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = get_activation("""mish""" )
self.assertIsInstance(snake_case__ ,nn.Mish )
self.assertEqual(act(torch.tensor(-200 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Dict = get_activation("""gelu""" )
self.assertIsInstance(snake_case__ ,nn.GELU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case_ ( lowerCAmelCase_ )-> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> XGBClassifier:
'''simple docstring'''
_UpperCAmelCase : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def snake_case_ ( )-> None:
'''simple docstring'''
_UpperCAmelCase : List[str] = load_iris()
_UpperCAmelCase : Optional[int] = data_handling(_lowercase )
_UpperCAmelCase : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.2_5 )
_UpperCAmelCase : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 363 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = b.T
_UpperCAmelCase : Optional[int] = np.sum(np.square(lowerCAmelCase_ ) , axis=1 )
_UpperCAmelCase : Union[str, Any] = np.sum(np.square(lowerCAmelCase_ ) , axis=0 )
_UpperCAmelCase : Optional[int] = np.matmul(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = x.reshape(-1 , 3 )
_UpperCAmelCase : List[Any] = squared_euclidean_distance(lowerCAmelCase_ , lowerCAmelCase_ )
return np.argmin(lowerCAmelCase_ , axis=1 )
class lowercase ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase = ["pixel_values"]
def __init__( self ,a_ = None ,a_ = True ,a_ = None ,a_ = PILImageResampling.BILINEAR ,a_ = True ,a_ = True ,**a_ ,) -> Dict:
super().__init__(**a__ )
_UpperCAmelCase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
_UpperCAmelCase : Dict = get_size_dict(a__ )
_UpperCAmelCase : List[str] = np.array(a__ ) if clusters is not None else None
_UpperCAmelCase : Dict = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : str = resample
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Dict = do_color_quantize
def _snake_case ( self ,a_ ,a_ ,a_ = PILImageResampling.BILINEAR ,a_ = None ,**a_ ,) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
a__ ,size=(size["""height"""], size["""width"""]) ,resample=a__ ,data_format=a__ ,**a__ )
def _snake_case ( self ,a_ ,a_ = None ,) -> int:
_UpperCAmelCase : List[str] = rescale(image=a__ ,scale=1 / 127.5 ,data_format=a__ )
_UpperCAmelCase : Any = image - 1
return image
def _snake_case ( self ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = ChannelDimension.FIRST ,**a_ ,) -> List[str]:
_UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
_UpperCAmelCase : List[str] = get_size_dict(a__ )
_UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase : Union[str, Any] = clusters if clusters is not None else self.clusters
_UpperCAmelCase : Tuple = np.array(a__ )
_UpperCAmelCase : Any = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : Dict = [to_numpy_array(a__ ) for image in images]
if do_resize:
_UpperCAmelCase : Tuple = [self.resize(image=a__ ,size=a__ ,resample=a__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Dict = [self.normalize(image=a__ ) for image in images]
if do_color_quantize:
_UpperCAmelCase : Tuple = [to_channel_dimension_format(a__ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase : Optional[Any] = np.array(a__ )
_UpperCAmelCase : Any = color_quantize(a__ ,a__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase : Tuple = images.shape[0]
_UpperCAmelCase : Any = images.reshape(a__ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase : Optional[Any] = list(a__ )
else:
_UpperCAmelCase : Any = [to_channel_dimension_format(a__ ,a__ ) for image in images]
_UpperCAmelCase : Optional[int] = {"""input_ids""": images}
return BatchFeature(data=a__ ,tensor_type=a__ )
| 364 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A_ : Union[str, Any] = random.Random()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None )-> Dict:
'''simple docstring'''
if rng is None:
_UpperCAmelCase : List[str] = global_rng
_UpperCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=7 ,a_=400 ,a_=2_000 ,a_=2_048 ,a_=128 ,a_=1 ,a_=512 ,a_=30 ,a_=44_100 ,) -> str:
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Optional[Any] = min_seq_length
_UpperCAmelCase : Dict = max_seq_length
_UpperCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Tuple = spectrogram_length
_UpperCAmelCase : Tuple = feature_size
_UpperCAmelCase : Union[str, Any] = num_audio_channels
_UpperCAmelCase : Optional[int] = hop_length
_UpperCAmelCase : List[str] = chunk_length
_UpperCAmelCase : str = sampling_rate
def _snake_case ( self ) -> List[str]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _snake_case ( self ,a_=False ,a_=False ) -> int:
def _flatten(a_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_UpperCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = TvltFeatureExtractor
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = TvltFeatureExtractionTester(self )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,"""sampling_rate""" ) )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[Any] = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = feat_extract_first.to_dict()
_UpperCAmelCase : Dict = feat_extract_second.to_dict()
_UpperCAmelCase : Any = dict_first.pop("""mel_filters""" )
_UpperCAmelCase : List[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase_ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_UpperCAmelCase : Dict = self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = feat_extract_first.to_dict()
_UpperCAmelCase : Any = feat_extract_second.to_dict()
_UpperCAmelCase : Dict = dict_first.pop("""mel_filters""" )
_UpperCAmelCase : List[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
def _snake_case ( self ) -> Dict:
# Initialize feature_extractor
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_UpperCAmelCase : Optional[Any] = feature_extractor(lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_UpperCAmelCase : int = feature_extractor(
lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ,mask_audio=lowerCAmelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase : Tuple = np.asarray(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _snake_case ( self ,a_ ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
_UpperCAmelCase : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _snake_case ( self ) -> int:
_UpperCAmelCase : int = self._load_datasamples(1 )
_UpperCAmelCase : List[Any] = TvltFeatureExtractor()
_UpperCAmelCase : Tuple = feature_extractor(lowerCAmelCase_ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
_UpperCAmelCase : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCAmelCase_ ,atol=1E-4 ) )
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ : Dict = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(_lowerCAmelCase )
class lowercase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = """rag"""
UpperCAmelCase = True
def __init__( self ,a_=None ,a_=True ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=" / " ,a_=" // " ,a_=5 ,a_=300 ,a_=768 ,a_=8 ,a_="wiki_dpr" ,a_="train" ,a_="compressed" ,a_=None ,a_=None ,a_=False ,a_=False ,a_=0.0 ,a_=True ,a_=False ,a_=False ,a_=False ,a_=True ,a_=None ,**a_ ,) -> str:
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ ,pad_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=SCREAMING_SNAKE_CASE_ ,forced_eos_token_id=SCREAMING_SNAKE_CASE_ ,is_encoder_decoder=SCREAMING_SNAKE_CASE_ ,prefix=SCREAMING_SNAKE_CASE_ ,vocab_size=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCAmelCase : Optional[Any] = kwargs.pop('question_encoder' )
_UpperCAmelCase : Tuple = question_encoder_config.pop('model_type' )
_UpperCAmelCase : List[Any] = kwargs.pop('generator' )
_UpperCAmelCase : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase : List[str] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : int = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : int = reduce_loss
_UpperCAmelCase : List[Any] = label_smoothing
_UpperCAmelCase : Optional[Any] = exclude_bos_score
_UpperCAmelCase : Optional[Any] = do_marginalize
_UpperCAmelCase : Union[str, Any] = title_sep
_UpperCAmelCase : int = doc_sep
_UpperCAmelCase : Dict = n_docs
_UpperCAmelCase : List[Any] = max_combined_length
_UpperCAmelCase : Dict = dataset
_UpperCAmelCase : Optional[int] = dataset_split
_UpperCAmelCase : Optional[int] = index_name
_UpperCAmelCase : List[Any] = retrieval_vector_size
_UpperCAmelCase : List[str] = retrieval_batch_size
_UpperCAmelCase : Optional[int] = passages_path
_UpperCAmelCase : Optional[int] = index_path
_UpperCAmelCase : Optional[Any] = use_dummy_dataset
_UpperCAmelCase : Optional[Any] = output_retrieved
_UpperCAmelCase : Tuple = do_deduplication
_UpperCAmelCase : Optional[int] = use_cache
if self.forced_eos_token_id is None:
_UpperCAmelCase : Optional[Any] = getattr(self.generator ,'forced_eos_token_id' ,SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls ,a_ ,a_ ,**a_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.question_encoder.to_dict()
_UpperCAmelCase : Tuple = self.generator.to_dict()
_UpperCAmelCase : Any = self.__class__.model_type
return output
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ : int = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
A_ : Dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
A_ : str = {value: key for key, value in encode_dict.items()}
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
if set(lowerCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only \'A\', \'B\' and spaces""" )
_UpperCAmelCase : Optional[int] = ''
for word in coded.split():
while len(lowerCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
_UpperCAmelCase : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( __a ):
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=False ,a_=True ,a_="None" ,a_=3 ,a_=4 ,a_=None ,) -> Dict:
_UpperCAmelCase : str = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : Dict = num_choices
_UpperCAmelCase : List[Any] = relative_attention
_UpperCAmelCase : Optional[int] = position_biased_input
_UpperCAmelCase : Optional[Any] = pos_att_type
_UpperCAmelCase : Any = scope
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[int]:
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _snake_case ( self ) -> str:
_UpperCAmelCase : Any = self.get_config()
_UpperCAmelCase : Optional[Any] = 300
return config
def _snake_case ( self ,a_ ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> str:
_UpperCAmelCase : Union[str, Any] = DebertaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ )[0]
_UpperCAmelCase : List[Any] = model(UpperCamelCase__ ,token_type_ids=UpperCamelCase__ )[0]
_UpperCAmelCase : Any = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : str = DebertaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> str:
_UpperCAmelCase : Tuple = self.num_labels
_UpperCAmelCase : str = DebertaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Any = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Any = DebertaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> int:
_UpperCAmelCase : Optional[Any] = DebertaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(
UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,start_positions=UpperCamelCase__ ,end_positions=UpperCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) : Dict = config_and_inputs
_UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Tuple = DebertaModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self ,config_class=UpperCamelCase__ ,hidden_size=37 )
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
@slow
def _snake_case ( self ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = DebertaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def _snake_case ( self ) -> Tuple:
pass
@slow
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : List[str] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : Dict = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,UpperCamelCase__ ,atol=1E-4 ) ,f'''{output[:, 1:4, 1:4]}''' )
| 369 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> Dict:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_UpperCAmelCase : Union[str, Any] = nn.Parameter(snake_case_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_UpperCAmelCase : Tuple = nn.Parameter(snake_case_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = np.asarray(weights[0] )
_UpperCAmelCase : Tuple = np.asarray(weights[1] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[0] )
_UpperCAmelCase : List[Any] = np.asarray(weights[1] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[2] )
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = weights[0][0][0]
_UpperCAmelCase : int = np.asarray(layer_norm_a[0] )
_UpperCAmelCase : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# lsh weights + output
_UpperCAmelCase : Optional[int] = weights[0][1]
if len(snake_case_ ) < 4:
set_layer_weights_in_torch_lsh(snake_case_ , torch_block.attention , snake_case_ )
else:
set_layer_weights_in_torch_local(snake_case_ , torch_block.attention , snake_case_ )
# intermediate weighs
_UpperCAmelCase : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case_ ) == 4:
_UpperCAmelCase : int = intermediate_weights[2]
# layernorm 2
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[0][0] )
_UpperCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# intermediate dense
_UpperCAmelCase : List[str] = np.asarray(intermediate_weights[1][0] )
_UpperCAmelCase : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
# intermediate out
_UpperCAmelCase : Any = np.asarray(intermediate_weights[4][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = torch_model.reformer
# word embeds
_UpperCAmelCase : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case_ ) , )
if isinstance(weights[3] , snake_case_ ):
_UpperCAmelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCAmelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_UpperCAmelCase : int = nn.Parameter(torch.tensor(snake_case_ ) )
_UpperCAmelCase : Union[str, Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCAmelCase : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case_ , snake_case_ , snake_case_ )
# output layer norm
_UpperCAmelCase : Any = np.asarray(weights[7][0] )
_UpperCAmelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# output embeddings
_UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = ReformerConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase : Dict = ReformerModelWithLMHead(snake_case_ )
with open(snake_case_ , """rb""" ) as f:
_UpperCAmelCase : Tuple = pickle.load(snake_case_ )["""weights"""]
set_model_weights_in_torch(snake_case_ , snake_case_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Any = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 370 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( _a ):
"""simple docstring"""
UpperCAmelCase = """philschmid/bart-large-cnn-samsum"""
UpperCAmelCase = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCAmelCase = """summarizer"""
UpperCAmelCase = AutoTokenizer
UpperCAmelCase = AutoModelForSeqaSeqLM
UpperCAmelCase = ["""text"""]
UpperCAmelCase = ["""text"""]
def _snake_case ( self ,a_ ) -> Dict:
return self.pre_processor(__lowerCAmelCase ,return_tensors="""pt""" ,truncation=__lowerCAmelCase )
def _snake_case ( self ,a_ ) -> Optional[Any]:
return self.model.generate(**__lowerCAmelCase )[0]
def _snake_case ( self ,a_ ) -> Optional[int]:
return self.pre_processor.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
| 371 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 0 |
'''simple docstring'''
import numpy as np
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
_UpperCAmelCase : Union[str, Any] = """The dog is cute and lives in the garden house"""
_UpperCAmelCase : int = jnp.array([tokenizer.encode(a_ )] )
_UpperCAmelCase : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase : Optional[int] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_UpperCAmelCase : Optional[Any] = model(a_ )["""last_hidden_state"""]
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,a_ ,atol=1E-3 ) )
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Dict = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
_UpperCAmelCase : Tuple = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCAmelCase__ ) , """Postfix""".center(lowerCAmelCase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , ("""""".join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , ("""""".join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
_UpperCAmelCase : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
_UpperCAmelCase : List[Any] = '''(''' # change ")" to "("
return (infix_2_postfix("""""".join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : str = input("""\nEnter an Infix Equation = """) # Input an Infix equation
A_ : Any = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 352 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[str] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
A_ : Optional[Any] = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
A_ : Optional[int] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = SqueezeBertTokenizer
def __init__( self ,a_=None ,a_=None ,a_=True ,a_="[UNK]" ,a_="[SEP]" ,a_="[PAD]" ,a_="[CLS]" ,a_="[MASK]" ,a_=True ,a_=None ,**a_ ,) -> Optional[int]:
super().__init__(
a_ ,tokenizer_file=a_ ,do_lower_case=a_ ,unk_token=a_ ,sep_token=a_ ,pad_token=a_ ,cls_token=a_ ,mask_token=a_ ,tokenize_chinese_chars=a_ ,strip_accents=a_ ,**a_ ,)
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,a_ ) != tokenize_chinese_chars
):
_UpperCAmelCase : List[str] = getattr(a_ ,normalizer_state.pop("""type""" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Dict = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**a_ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def _snake_case ( self ,a_ ,a_=None ) -> str:
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self ,a_ ,a_ = None ) -> List[int]:
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self ,a_ ,a_ = None ) -> Tuple[str]:
_UpperCAmelCase : List[str] = self._tokenizer.model.save(a_ ,name=a_ )
return tuple(a_ )
| 353 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A_ = False, False, False
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase = field(default="""Audio""" , init=lowercase_ , repr=lowercase_ )
def __call__( self ) -> Tuple:
return self.pa_type
def _snake_case ( self ,a_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(a__ ,a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ ,a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_UpperCAmelCase : List[str] = BytesIO()
sf.write(a__ ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_UpperCAmelCase : List[Any] = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
_UpperCAmelCase : Optional[Any] = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 32_767
_UpperCAmelCase : Dict = BytesIO(bytes() )
sf.write(a__ ,a__ ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self ,a_ ,a_ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
_UpperCAmelCase : int = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
_UpperCAmelCase : str = token_per_repo_id or {}
_UpperCAmelCase : Optional[int] = path.split("""::""" )[-1]
try:
_UpperCAmelCase : Optional[Any] = string_to_dict(a__ ,config.HUB_DATASETS_URL )["""repo_id"""]
_UpperCAmelCase : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_UpperCAmelCase : Dict = None
with xopen(a__ ,"""rb""" ,use_auth_token=a__ ) as f:
_UpperCAmelCase ,_UpperCAmelCase : str = sf.read(a__ )
else:
_UpperCAmelCase ,_UpperCAmelCase : str = sf.read(a__ )
_UpperCAmelCase : Any = array.T
if self.mono:
_UpperCAmelCase : List[str] = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_UpperCAmelCase : List[Any] = librosa.resample(a__ ,orig_sr=a__ ,target_sr=self.sampling_rate )
_UpperCAmelCase : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self ,a_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_UpperCAmelCase : str = pa.array([None] * len(a__ ) ,type=pa.binary() )
_UpperCAmelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_UpperCAmelCase : List[Any] = pa.array([None] * len(a__ ) ,type=pa.string() )
_UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
_UpperCAmelCase : Dict = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_UpperCAmelCase : List[str] = storage.field("""bytes""" )
else:
_UpperCAmelCase : int = pa.array([None] * len(a__ ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_UpperCAmelCase : Tuple = storage.field("""path""" )
else:
_UpperCAmelCase : Optional[int] = pa.array([None] * len(a__ ) ,type=pa.string() )
_UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
return array_cast(a__ ,self.pa_type )
def _snake_case ( self ,a_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(a_ ):
with xopen(a__ ,"""rb""" ) as f:
_UpperCAmelCase : Tuple = f.read()
return bytes_
_UpperCAmelCase : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_UpperCAmelCase : Dict = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
_UpperCAmelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(a__ ,self.pa_type )
| 354 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> Union[str, Any]:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase_ )
_UpperCAmelCase : int = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_UpperCAmelCase : List[Any] = dataset_size < in_memory_max_size
else:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : str = is_small_dataset(UpperCAmelCase_ )
assert result == expected
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[str] = start
_UpperCAmelCase : List[Any] = end
_UpperCAmelCase : List[Any] = val
_UpperCAmelCase : Any = (start + end) // 2
_UpperCAmelCase : str = left
_UpperCAmelCase : str = right
def __repr__( self ) -> Any:
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> Tuple:
_UpperCAmelCase : str = collection
_UpperCAmelCase : List[str] = function
if self.collection:
_UpperCAmelCase : Union[str, Any] = self._build_tree(0 ,len(__snake_case ) - 1 )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
self._update_tree(self.root ,__snake_case ,__snake_case )
def _snake_case ( self ,a_ ,a_ ) -> Tuple:
return self._query_range(self.root ,__snake_case ,__snake_case )
def _snake_case ( self ,a_ ,a_ ) -> Dict:
if start == end:
return SegmentTreeNode(__snake_case ,__snake_case ,self.collection[start] )
_UpperCAmelCase : List[str] = (start + end) // 2
_UpperCAmelCase : Union[str, Any] = self._build_tree(__snake_case ,__snake_case )
_UpperCAmelCase : Dict = self._build_tree(mid + 1 ,__snake_case )
return SegmentTreeNode(__snake_case ,__snake_case ,self.fn(left.val ,right.val ) ,__snake_case ,__snake_case )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Optional[int]:
if node.start == i and node.end == i:
_UpperCAmelCase : Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left ,__snake_case ,__snake_case )
else:
self._update_tree(node.right ,__snake_case ,__snake_case )
_UpperCAmelCase : str = self.fn(node.left.val ,node.right.val )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Union[str, Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,__snake_case ,__snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,__snake_case ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,__snake_case ) ,)
else:
# range in right child tree
return self._query_range(node.right ,__snake_case ,__snake_case )
def _snake_case ( self ) -> Any:
if self.root is not None:
_UpperCAmelCase : Dict = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCAmelCase : Union[str, Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
A_ : Union[str, Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class lowercase ( __a ):
"""simple docstring"""
UpperCAmelCase = ["""input_features"""]
def __init__( self ,a_=80 ,a_=16_000 ,a_=160 ,a_=30 ,a_=400 ,a_=0.0 ,a_=False ,**a_ ,) -> List[Any]:
super().__init__(
feature_size=a__ ,sampling_rate=a__ ,padding_value=a__ ,return_attention_mask=a__ ,**a__ ,)
_UpperCAmelCase : Any = n_fft
_UpperCAmelCase : List[Any] = hop_length
_UpperCAmelCase : List[Any] = chunk_length
_UpperCAmelCase : Optional[int] = chunk_length * sampling_rate
_UpperCAmelCase : Any = self.n_samples // hop_length
_UpperCAmelCase : Union[str, Any] = sampling_rate
_UpperCAmelCase : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=a__ ,min_frequency=0.0 ,max_frequency=8_000.0 ,sampling_rate=a__ ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _snake_case ( self ,a_ ) -> List[str]:
_UpperCAmelCase : str = spectrogram(
a__ ,window_function(self.n_fft ,"""hann""" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel="""log10""" ,)
_UpperCAmelCase : Optional[Any] = log_spec[:, :-1]
_UpperCAmelCase : Union[str, Any] = np.maximum(a__ ,log_spec.max() - 8.0 )
_UpperCAmelCase : List[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( a_ ,a_ ,a_ = 0.0 ) -> Dict:
if attention_mask is not None:
_UpperCAmelCase : Optional[Any] = np.array(a__ ,np.intaa )
_UpperCAmelCase : Optional[Any] = []
for vector, length in zip(a__ ,attention_mask.sum(-1 ) ):
_UpperCAmelCase : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_UpperCAmelCase : str = padding_value
normed_input_values.append(a__ )
else:
_UpperCAmelCase : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self ,a_ ,a_ = True ,a_ = None ,a_ = None ,a_ = None ,a_ = "max_length" ,a_ = None ,a_ = None ,a_ = None ,**a_ ,) -> List[str]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : Optional[Any] = isinstance(a__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase : str = is_batched_numpy or (
isinstance(a__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Dict = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ ,np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(a__ ,dtype=np.floataa )
elif isinstance(a__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [np.asarray([raw_speech] ).T]
_UpperCAmelCase : Any = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_UpperCAmelCase : Any = self.pad(
a__ ,padding=a__ ,max_length=max_length if max_length else self.n_samples ,truncation=a__ ,pad_to_multiple_of=a__ ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCAmelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] ,attention_mask=padded_inputs["""attention_mask"""] ,padding_value=self.padding_value ,)
_UpperCAmelCase : Any = np.stack(padded_inputs["""input_features"""] ,axis=0 )
# make sure list is in array format
_UpperCAmelCase : str = padded_inputs.get("""input_features""" ).transpose(2 ,0 ,1 )
_UpperCAmelCase : int = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] ,a__ ):
_UpperCAmelCase : List[str] = [np.asarray(a__ ,dtype=np.floataa ) for feature in input_features]
else:
_UpperCAmelCase : Optional[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCAmelCase : str = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
_UpperCAmelCase : Any = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 360 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
def get_masked_lm_array(lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : Optional[int] = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[Any] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_array(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : str = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_layer_array(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : Optional[Any] = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_attention_layer_array(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : int = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = array.reshape(lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
print(F'''Loading model based on config from {config_path}...''' )
_UpperCAmelCase : Any = BertConfig.from_json_file(lowerCAmelCase_ )
_UpperCAmelCase : int = BertForMaskedLM(lowerCAmelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCAmelCase : BertSelfAttention = layer.attention.self
_UpperCAmelCase : int = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_UpperCAmelCase : Any = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_UpperCAmelCase : Any = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCAmelCase : BertSelfOutput = layer.attention.output
_UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_UpperCAmelCase : int = get_encoder_layer_array(lowerCAmelCase_ , """_attention_layer_norm/gamma""" )
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_attention_layer_norm/beta""" )
# Intermediate
_UpperCAmelCase : BertIntermediate = layer.intermediate
_UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCAmelCase_ , """_intermediate_dense/kernel""" )
_UpperCAmelCase : Dict = get_encoder_layer_array(lowerCAmelCase_ , """_intermediate_dense/bias""" )
# Output
_UpperCAmelCase : BertOutput = layer.output
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_output_dense/kernel""" )
_UpperCAmelCase : List[str] = get_encoder_layer_array(lowerCAmelCase_ , """_output_dense/bias""" )
_UpperCAmelCase : List[Any] = get_encoder_layer_array(lowerCAmelCase_ , """_output_layer_norm/gamma""" )
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_output_layer_norm/beta""" )
# Embeddings
_UpperCAmelCase : Optional[Any] = get_encoder_array("""_position_embedding_layer/embeddings""" )
_UpperCAmelCase : List[Any] = get_encoder_array("""_type_embedding_layer/embeddings""" )
_UpperCAmelCase : Any = get_encoder_array("""_embedding_norm_layer/gamma""" )
_UpperCAmelCase : Optional[Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_UpperCAmelCase : Dict = model.cls.predictions.transform
_UpperCAmelCase : Any = get_masked_lm_array("""dense/kernel""" )
_UpperCAmelCase : List[Any] = get_masked_lm_array("""dense/bias""" )
_UpperCAmelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
_UpperCAmelCase : Any = get_masked_lm_array("""layer_norm/beta""" )
_UpperCAmelCase : List[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
_UpperCAmelCase : str = BertPooler(config=lowerCAmelCase_ )
_UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
_UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCAmelCase_ )
# Integration test - should load without any errors ;)
_UpperCAmelCase : str = BertForMaskedLM.from_pretrained(lowerCAmelCase_ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
A_ : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _snake_case ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _snake_case ( self ) -> int:
_UpperCAmelCase : Dict = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" )
_UpperCAmelCase : int = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 25_506, """token_str""": """ accuser"""},
] ,)
_UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] ,)
_UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2_941, """token_str""": """ Te"""},
] ,)
@require_torch
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[Any] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" )
_UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] ,)
_UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] ,)
_UpperCAmelCase : Any = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
] ,)
_UpperCAmelCase : Tuple = unmasker("""My name is <mask> <mask>""" ,top_k=2 )
self.assertEqual(
nested_simplify(__a ,decimals=6 ) ,[
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] ,)
@require_torch_gpu
def _snake_case ( self ) -> int:
_UpperCAmelCase : str = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase : List[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__a ,__a )
@slow
@require_torch
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" )
self.run_large_test(__a )
@slow
@require_tf
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" )
self.run_large_test(__a )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__a ) ,[
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] ,)
_UpperCAmelCase : List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__a ) ,[
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] ,)
_UpperCAmelCase : List[Any] = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 )
self.assertEqual(
nested_simplify(__a ) ,[
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] ,)
@require_torch
def _snake_case ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Dict = None
self.run_pipeline_test(__a ,[] )
@require_tf
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Any = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : str = None
self.run_pipeline_test(__a ,[] )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__a ,tokenizer=__a )
_UpperCAmelCase : Union[str, Any] = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _snake_case ( self ,a_ ,a_ ) -> str:
_UpperCAmelCase : Union[str, Any] = fill_masker.tokenizer
_UpperCAmelCase : Tuple = fill_masker.model
_UpperCAmelCase : Dict = fill_masker(
f'''This is a {tokenizer.mask_token}''' ,)
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
_UpperCAmelCase : Tuple = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
_UpperCAmelCase : Any = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__a ,[
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
] ,)
with self.assertRaises(__a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__a ):
fill_masker("""This is""" )
self.run_test_top_k(__a ,__a )
self.run_test_targets(__a ,__a )
self.run_test_top_k_targets(__a ,__a )
self.fill_mask_with_duplicate_targets_and_top_k(__a ,__a )
self.fill_mask_with_multiple_masks(__a ,__a )
def _snake_case ( self ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : List[Any] = tokenizer.get_vocab()
_UpperCAmelCase : Optional[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase : int = FillMaskPipeline(model=__a ,tokenizer=__a ,targets=__a )
_UpperCAmelCase : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
_UpperCAmelCase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,__a )
_UpperCAmelCase : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(__a ) )
# Call argument
_UpperCAmelCase : Dict = FillMaskPipeline(model=__a ,tokenizer=__a )
_UpperCAmelCase : str = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__a )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
_UpperCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} ,__a )
_UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} ,set(__a ) )
# Score equivalence
_UpperCAmelCase : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__a )
_UpperCAmelCase : int = [top_mask["""token_str"""] for top_mask in outputs]
_UpperCAmelCase : Optional[int] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ) == set(__a ):
_UpperCAmelCase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__a )
_UpperCAmelCase : List[Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
# Raises with invalid
with self.assertRaises(__a ):
_UpperCAmelCase : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__a ):
_UpperCAmelCase : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[""""""] )
with self.assertRaises(__a ):
_UpperCAmelCase : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets="""""" )
def _snake_case ( self ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__a ,tokenizer=__a ,top_k=2 )
_UpperCAmelCase : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
_UpperCAmelCase : Tuple = FillMaskPipeline(model=__a ,tokenizer=__a )
_UpperCAmelCase : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__a ,[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
] ,)
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Union[str, Any] = tokenizer.get_vocab()
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__a ,tokenizer=__a )
# top_k=2, ntargets=3
_UpperCAmelCase : int = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 ,targets=__a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase : int = [el["""token_str"""] for el in sorted(__a ,key=lambda a_ : x["score"] ,reverse=__a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ).issubset(__a ):
_UpperCAmelCase : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=3 ,targets=__a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__a ) ,nested_simplify(__a ) )
def _snake_case ( self ,a_ ,a_ ) -> Dict:
_UpperCAmelCase : Dict = FillMaskPipeline(model=__a ,tokenizer=__a )
_UpperCAmelCase : int = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase : Any = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase : str = fill_masker(f'''My name is {tokenizer.mask_token}''' ,targets=__a ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__a ) ,3 )
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : str = FillMaskPipeline(model=__a ,tokenizer=__a )
_UpperCAmelCase : str = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__a ,[
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
[
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
{"""sequence""": ANY(__a ), """score""": ANY(__a ), """token""": ANY(__a ), """token_str""": ANY(__a )},
],
] ,)
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MobileBertTokenizer
UpperCAmelCase = MobileBertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = filter_non_english
UpperCAmelCase = """google/mobilebert-uncased"""
def _snake_case ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_UpperCAmelCase : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _snake_case ( self ,a_ ) -> Dict:
_UpperCAmelCase : Dict = """UNwant\u00E9d,running"""
_UpperCAmelCase : str = """unwanted, running"""
return input_text, output_text
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def _snake_case ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = """UNwant\u00E9d,running"""
_UpperCAmelCase : List[str] = tokenizer.tokenize(UpperCamelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
# With lower casing
_UpperCAmelCase : Dict = self.get_tokenizer(do_lower_case=UpperCamelCase__ )
_UpperCAmelCase : str = self.get_rust_tokenizer(do_lower_case=UpperCamelCase__ )
_UpperCAmelCase : List[str] = """UNwant\u00E9d,running"""
_UpperCAmelCase : str = tokenizer.tokenize(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase__ ,strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=UpperCamelCase__ ,strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : str = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=UpperCamelCase__ ,strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = BasicTokenizer(do_lower_case=UpperCamelCase__ ,strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> str:
_UpperCAmelCase : str = BasicTokenizer(do_lower_case=UpperCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : List[str] = i
_UpperCAmelCase : Tuple = WordpieceTokenizer(vocab=UpperCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self ) -> List[str]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self ) -> Any:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase : Any = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ,UpperCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _snake_case ( self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCAmelCase : Optional[Any] = tokenizer_r.encode_plus(
UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,return_token_type_ids=UpperCamelCase__ ,return_offsets_mapping=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,)
_UpperCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ ,"""do_lower_case""" ) else False
_UpperCAmelCase : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[int] = ["""的""", """人""", """有"""]
_UpperCAmelCase : List[str] = """""".join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase : int = tokenizer_p.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : Tuple = tokenizer_r.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
_UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
_UpperCAmelCase : Dict = tokenizer_r.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : Any = tokenizer_p.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
_UpperCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase : Dict = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
| 363 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A_ : Dict = True
except (ImportError, AttributeError):
A_ : List[Any] = object
def snake_case_ ( *lowerCAmelCase_ , **lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
pass
A_ : str = False
A_ : Optional[int] = logging.get_logger("""transformers-cli/serving""")
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE_ , args.host , args.port , args.workers )
class lowercase ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase = 42
class lowercase ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase = 42
class lowercase ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase = 42
class lowercase ( snake_case__ ):
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = parser.add_parser(
"""serve""" ,help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" ,type=_A ,choices=get_supported_tasks() ,help="""The task to run the pipeline on""" ,)
serve_parser.add_argument("""--host""" ,type=_A ,default="""localhost""" ,help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" ,type=_A ,default=8_888 ,help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" ,type=_A ,default=1 ,help="""Number of http workers""" )
serve_parser.add_argument("""--model""" ,type=_A ,help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" ,type=_A ,help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" ,type=_A ,help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" ,type=_A ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,)
serve_parser.set_defaults(func=_A )
def __init__( self ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : List[str] = pipeline
_UpperCAmelCase : Any = host
_UpperCAmelCase : Optional[Any] = port
_UpperCAmelCase : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'''Serving model over {host}:{port}''' )
_UpperCAmelCase : Union[str, Any] = FastAPI(
routes=[
APIRoute(
"""/""" ,self.model_info ,response_model=_A ,response_class=_A ,methods=["""GET"""] ,),
APIRoute(
"""/tokenize""" ,self.tokenize ,response_model=_A ,response_class=_A ,methods=["""POST"""] ,),
APIRoute(
"""/detokenize""" ,self.detokenize ,response_model=_A ,response_class=_A ,methods=["""POST"""] ,),
APIRoute(
"""/forward""" ,self.forward ,response_model=_A ,response_class=_A ,methods=["""POST"""] ,),
] ,timeout=600 ,)
def _snake_case ( self ) -> Any:
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def _snake_case ( self ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _snake_case ( self ,a_ = Body(_A ,embed=_A ) ,a_ = Body(_A ,embed=_A ) ) -> Optional[int]:
try:
_UpperCAmelCase : Dict = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
_UpperCAmelCase : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A ,tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=500 ,detail={"""model""": """""", """error""": str(_A )} )
def _snake_case ( self ,a_ = Body(_A ,embed=_A ) ,a_ = Body(_A ,embed=_A ) ,a_ = Body(_A ,embed=_A ) ,) -> List[str]:
try:
_UpperCAmelCase : Optional[int] = self._pipeline.tokenizer.decode(_A ,_A ,_A )
return ServeDeTokenizeResult(model="""""" ,text=_A )
except Exception as e:
raise HTTPException(status_code=500 ,detail={"""model""": """""", """error""": str(_A )} )
async def _snake_case ( self ,a_=Body(_A ,embed=_A ) ) -> Optional[int]:
if len(_A ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
_UpperCAmelCase : str = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(500 ,{"""error""": str(_A )} )
| 364 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ : Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ["""DeiTFeatureExtractor"""]
A_ : Optional[int] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def snake_case_ ( lowerCAmelCase_ )-> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( )-> Iterator[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def snake_case_ ( lowerCAmelCase_ = 2000000 )-> int:
'''simple docstring'''
return sum(takewhile(lambda lowerCAmelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
A_ : Tuple = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
A_ : Union[str, Any] = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
A_ : Union[str, Any] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
A_ : Optional[int] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
A_ : Optional[int] = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
A_ : Tuple = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
A_ : int = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = randrange(len(lowerCAmelCase_ ) ), randrange(len(lowerCAmelCase_ ) )
_UpperCAmelCase : List[str] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
_UpperCAmelCase : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case_ ( lowerCAmelCase_ = 100 )-> List[str]:
'''simple docstring'''
return (generate_random_hand() for _ in range(lowerCAmelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = PokerHand(lowerCAmelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
assert PokerHand(lowerCAmelCase_ ).compare_with(PokerHand(lowerCAmelCase_ ) ) == expected
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = [PokerHand(lowerCAmelCase_ ) for hand in SORTED_HANDS]
_UpperCAmelCase : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase_ )
_UpperCAmelCase : int = chain(sorted(lowerCAmelCase_ ) )
for index, hand in enumerate(lowerCAmelCase_ ):
assert hand == poker_hands[index]
def snake_case_ ( )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowerCAmelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = PokerHand("""2C 4S AS 3D 5C""" )
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case_ ( )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(lowerCAmelCase_ ) )
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase_ , """poker_hands.txt""" )
with open(lowerCAmelCase_ ) as file_hand:
for line in file_hand:
_UpperCAmelCase : Union[str, Any] = line[:14].strip()
_UpperCAmelCase : Union[str, Any] = line[15:].strip()
_UpperCAmelCase : List[Any] = PokerHand(lowerCAmelCase_ ), PokerHand(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = player.compare_with(lowerCAmelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 367 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
A_ : int = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
A_ : Union[str, Any] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
A_ : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
A_ : str = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
A_ : int = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
A_ : Optional[Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
A_ : Optional[int] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
A_ : Tuple = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ : Dict = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
A_ : Any = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def snake_case_ ( lowerCAmelCase_ )-> list[float]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = len(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : float = -1
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] < arr[j]:
_UpperCAmelCase : List[Any] = arr[j]
break
result.append(lowerCAmelCase_ )
return result
def snake_case_ ( lowerCAmelCase_ )-> list[float]:
'''simple docstring'''
_UpperCAmelCase : Any = []
for i, outer in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCAmelCase : Any = inner
break
result.append(lowerCAmelCase_ )
return result
def snake_case_ ( lowerCAmelCase_ )-> list[float]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = len(lowerCAmelCase_ )
_UpperCAmelCase : list[float] = []
_UpperCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCAmelCase : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
A_ : Tuple = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 369 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = CodeGenTokenizer
UpperCAmelCase = CodeGenTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = {"""add_prefix_space""": True}
UpperCAmelCase = False
def _snake_case ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCAmelCase : List[Any] = dict(zip(UpperCamelCase__ ,range(len(UpperCamelCase__ ) ) ) )
_UpperCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def _snake_case ( self ,**a_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def _snake_case ( self ,**a_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Any = "lower newer"
_UpperCAmelCase : Dict = "lower newer"
return input_text, output_text
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_UpperCAmelCase : Union[str, Any] = "lower newer"
_UpperCAmelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase__ ,add_prefix_space=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) ,UpperCamelCase__ )
def _snake_case ( self ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : str = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
_UpperCAmelCase : List[Any] = "lower newer"
# Testing tokenization
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ ,add_prefix_space=UpperCamelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,add_prefix_space=UpperCamelCase__ )
_UpperCAmelCase : str = rust_tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
# Testing conversion to ids with special tokens
_UpperCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase__ ,add_prefix_space=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
# Testing the unknown token
_UpperCAmelCase : Any = tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) ,UpperCamelCase__ )
def _snake_case ( self ,*a_ ,**a_ ) -> str:
pass
def _snake_case ( self ,a_=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ ,**UpperCamelCase__ )
# Simple input
_UpperCAmelCase : Optional[int] = "This is a simple input"
_UpperCAmelCase : Dict = ["This is a simple input 1", "This is a simple input 2"]
_UpperCAmelCase : Tuple = ("This is a simple input", "This is a pair")
_UpperCAmelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ ,tokenizer_r.encode ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase__ ,tokenizer_r.encode_plus ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase__ ,tokenizer_r.batch_encode_plus ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" ,)
# Pair input
self.assertRaises(UpperCamelCase__ ,tokenizer_r.encode ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase__ ,tokenizer_r.encode_plus ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase__ ,tokenizer_r.batch_encode_plus ,UpperCamelCase__ ,max_length=UpperCamelCase__ ,padding="""max_length""" ,)
def _snake_case ( self ) -> str:
_UpperCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="""<pad>""" )
# Simple input
_UpperCAmelCase : Dict = "This is a simple input"
_UpperCAmelCase : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCAmelCase : str = ("This is a simple input", "This is a pair")
_UpperCAmelCase : Tuple = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCAmelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase__ ,padding="""max_length""" ,max_length=30 ,return_tensors="""np""" )
_UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase__ ,padding=UpperCamelCase__ ,truncate=UpperCamelCase__ ,return_tensors="""np""" )
_UpperCAmelCase : Dict = tokenizer(*UpperCamelCase__ ,padding="""max_length""" ,max_length=60 ,return_tensors="""np""" )
_UpperCAmelCase : Dict = tokenizer(UpperCamelCase__ ,padding=UpperCamelCase__ ,truncate=UpperCamelCase__ ,return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Any = "$$$"
_UpperCAmelCase : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=UpperCamelCase__ ,add_bos_token=UpperCamelCase__ )
_UpperCAmelCase : int = "This is a simple input"
_UpperCAmelCase : int = ["This is a simple input 1", "This is a simple input 2"]
_UpperCAmelCase : int = tokenizer.bos_token_id
_UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer(UpperCamelCase__ )
self.assertEqual(out_s.input_ids[0] ,UpperCamelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCAmelCase : Tuple = tokenizer.decode(out_s.input_ids )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,UpperCamelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_UpperCAmelCase : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCAmelCase : int = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = ["^#", re.escape("""<|endoftext|>""" ), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCAmelCase : Tuple = tokenizer.decode(UpperCamelCase__ ,truncate_before_pattern=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ ,UpperCamelCase__ )
def _snake_case ( self ) -> str:
pass
| 370 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : Any = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : Union[str, Any] = DisjunctiveConstraint(__snake_case )
self.assertTrue(isinstance(dc.token_ids ,__snake_case ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case ( self ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Any = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(__snake_case ) # fails here
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(__snake_case )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = dc.update(1 )
_UpperCAmelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = dc.update(2 )
_UpperCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(__snake_case )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 371 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , )-> float:
'''simple docstring'''
_UpperCAmelCase : str = x_start
_UpperCAmelCase : Union[str, Any] = fnc(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCAmelCase : Any = (x_end - x_start) / steps + xa
_UpperCAmelCase : List[Any] = fnc(lowerCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCAmelCase : Any = xa
_UpperCAmelCase : str = fxa
return area
if __name__ == "__main__":
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A_ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 349 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> Any:
_UpperCAmelCase : Optional[int] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self ,a_ ,a_ ,*a_ ,**a_ ) -> Any:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
_UpperCAmelCase : List[Any] = kwargs.pop("""main_process_only""" ,a_ )
_UpperCAmelCase : Tuple = kwargs.pop("""in_order""" ,a_ )
if self.isEnabledFor(a_ ):
if self._should_log(a_ ):
_UpperCAmelCase : List[Any] = self.process(a_ ,a_ )
self.logger.log(a_ ,a_ ,*a_ ,**a_ )
elif in_order:
_UpperCAmelCase : Tuple = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase : List[str] = self.process(a_ ,a_ )
self.logger.log(a_ ,a_ ,*a_ ,**a_ )
state.wait_for_everyone()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = None )-> Tuple:
'''simple docstring'''
if log_level is None:
_UpperCAmelCase : Dict = os.environ.get("""ACCELERATE_LOG_LEVEL""" , lowerCAmelCase_ )
_UpperCAmelCase : Dict = logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_ , {} )
| 350 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = False
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
_UpperCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
torch.manual_seed(0 )
_UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
_UpperCAmelCase : str = CLIPTextModel(a_ )
_UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self ,a_ ,a_=0 ) -> Any:
if str(a_ ).startswith("""mps""" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(a_ )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
_UpperCAmelCase : Optional[int] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
_UpperCAmelCase : Dict = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """."""
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Dict = sag_pipe(
[prompt] ,generator=a_ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type="""np""" )
_UpperCAmelCase : List[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : Optional[int] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Dict = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase : Tuple = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """."""
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : str = sag_pipe(
[prompt] ,generator=a_ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : Any = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : str = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase : List[Any] = sag_pipe.to(a_ )
sag_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """."""
_UpperCAmelCase : Dict = torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = sag_pipe(
[prompt] ,width=768 ,height=512 ,generator=a_ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type="""np""" ,)
_UpperCAmelCase : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_=3 ,a_=3 ,a_=("DownEncoderBlock2D",) ,a_=(64,) ,a_=2 ,a_=32 ,a_="silu" ,a_=True ,) -> Dict:
super().__init__()
_UpperCAmelCase : List[Any] = layers_per_block
_UpperCAmelCase : Tuple = torch.nn.Convad(
a_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_UpperCAmelCase : str = None
_UpperCAmelCase : int = nn.ModuleList([] )
# down
_UpperCAmelCase : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(a_ ):
_UpperCAmelCase : Union[str, Any] = output_channel
_UpperCAmelCase : Any = block_out_channels[i]
_UpperCAmelCase : List[Any] = i == len(a_ ) - 1
_UpperCAmelCase : Tuple = get_down_block(
a_ ,num_layers=self.layers_per_block ,in_channels=a_ ,out_channels=a_ ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=a_ ,resnet_groups=a_ ,attention_head_dim=a_ ,temb_channels=a_ ,)
self.down_blocks.append(a_ )
# mid
_UpperCAmelCase : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" ,attention_head_dim=block_out_channels[-1] ,resnet_groups=a_ ,temb_channels=a_ ,)
# out
_UpperCAmelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=a_ ,eps=1E-6 )
_UpperCAmelCase : List[Any] = nn.SiLU()
_UpperCAmelCase : int = 2 * out_channels if double_z else out_channels
_UpperCAmelCase : Optional[Any] = nn.Convad(block_out_channels[-1] ,a_ ,3 ,padding=1 )
_UpperCAmelCase : Optional[Any] = False
def _snake_case ( self ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Any = x
_UpperCAmelCase : int = self.conv_in(a_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a_ ):
def custom_forward(*a_ ):
return module(*a_ )
return custom_forward
# down
if is_torch_version(""">=""" ,"""1.11.0""" ):
for down_block in self.down_blocks:
_UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(a_ ) ,a_ ,use_reentrant=a_ )
# middle
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,use_reentrant=a_ )
else:
for down_block in self.down_blocks:
_UpperCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(a_ ) ,a_ )
# middle
_UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,a_ )
else:
# down
for down_block in self.down_blocks:
_UpperCAmelCase : Tuple = down_block(a_ )
# middle
_UpperCAmelCase : Dict = self.mid_block(a_ )
# post-process
_UpperCAmelCase : str = self.conv_norm_out(a_ )
_UpperCAmelCase : Dict = self.conv_act(a_ )
_UpperCAmelCase : Optional[int] = self.conv_out(a_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_=3 ,a_=3 ,a_=("UpDecoderBlock2D",) ,a_=(64,) ,a_=2 ,a_=32 ,a_="silu" ,a_="group" ,) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[str] = layers_per_block
_UpperCAmelCase : int = nn.Convad(
a_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : Optional[Any] = in_channels if norm_type == """spatial""" else None
# mid
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=a_ ,temb_channels=a_ ,)
# up
_UpperCAmelCase : str = list(reversed(a_ ) )
_UpperCAmelCase : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a_ ):
_UpperCAmelCase : str = output_channel
_UpperCAmelCase : Optional[int] = reversed_block_out_channels[i]
_UpperCAmelCase : Dict = i == len(a_ ) - 1
_UpperCAmelCase : List[str] = get_up_block(
a_ ,num_layers=self.layers_per_block + 1 ,in_channels=a_ ,out_channels=a_ ,prev_output_channel=a_ ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=a_ ,resnet_groups=a_ ,attention_head_dim=a_ ,temb_channels=a_ ,resnet_time_scale_shift=a_ ,)
self.up_blocks.append(a_ )
_UpperCAmelCase : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
_UpperCAmelCase : Optional[Any] = SpatialNorm(block_out_channels[0] ,a_ )
else:
_UpperCAmelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=a_ ,eps=1E-6 )
_UpperCAmelCase : Optional[int] = nn.SiLU()
_UpperCAmelCase : Any = nn.Convad(block_out_channels[0] ,a_ ,3 ,padding=1 )
_UpperCAmelCase : List[Any] = False
def _snake_case ( self ,a_ ,a_=None ) -> Any:
_UpperCAmelCase : Optional[Any] = z
_UpperCAmelCase : List[str] = self.conv_in(a_ )
_UpperCAmelCase : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a_ ):
def custom_forward(*a_ ):
return module(*a_ )
return custom_forward
if is_torch_version(""">=""" ,"""1.11.0""" ):
# middle
_UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,a_ ,use_reentrant=a_ )
_UpperCAmelCase : Optional[Any] = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(a_ ) ,a_ ,a_ ,use_reentrant=a_ )
else:
# middle
_UpperCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,a_ ,a_ )
_UpperCAmelCase : Dict = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(a_ ) ,a_ ,a_ )
else:
# middle
_UpperCAmelCase : List[Any] = self.mid_block(a_ ,a_ )
_UpperCAmelCase : str = sample.to(a_ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : Union[str, Any] = up_block(a_ ,a_ )
# post-process
if latent_embeds is None:
_UpperCAmelCase : Optional[Any] = self.conv_norm_out(a_ )
else:
_UpperCAmelCase : Optional[Any] = self.conv_norm_out(a_ ,a_ )
_UpperCAmelCase : Optional[Any] = self.conv_act(a_ )
_UpperCAmelCase : List[str] = self.conv_out(a_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_=None ,a_="random" ,a_=False ,a_=True ) -> List[str]:
super().__init__()
_UpperCAmelCase : Dict = n_e
_UpperCAmelCase : Any = vq_embed_dim
_UpperCAmelCase : Union[str, Any] = beta
_UpperCAmelCase : Tuple = legacy
_UpperCAmelCase : List[Any] = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
_UpperCAmelCase : str = remap
if self.remap is not None:
self.register_buffer("""used""" ,torch.tensor(np.load(self.remap ) ) )
_UpperCAmelCase : Optional[Any] = self.used.shape[0]
_UpperCAmelCase : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCAmelCase : Tuple = self.re_embed
_UpperCAmelCase : int = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCAmelCase : Any = n_e
_UpperCAmelCase : Dict = sane_index_shape
def _snake_case ( self ,a_ ) -> Tuple:
_UpperCAmelCase : Optional[Any] = inds.shape
assert len(a_ ) > 1
_UpperCAmelCase : str = inds.reshape(ishape[0] ,-1 )
_UpperCAmelCase : List[Any] = self.used.to(a_ )
_UpperCAmelCase : Any = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCAmelCase : List[Any] = match.argmax(-1 )
_UpperCAmelCase : str = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCAmelCase : int = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
_UpperCAmelCase : str = self.unknown_index
return new.reshape(a_ )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = inds.shape
assert len(a_ ) > 1
_UpperCAmelCase : int = inds.reshape(ishape[0] ,-1 )
_UpperCAmelCase : Union[str, Any] = self.used.to(a_ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCAmelCase : Tuple = 0 # simply set to zero
_UpperCAmelCase : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,a_ )
return back.reshape(a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCAmelCase : Optional[int] = z.permute(0 ,2 ,3 ,1 ).contiguous()
_UpperCAmelCase : Optional[Any] = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCAmelCase : Dict = torch.argmin(torch.cdist(a_ ,self.embedding.weight ) ,dim=1 )
_UpperCAmelCase : List[Any] = self.embedding(a_ ).view(z.shape )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
# compute loss for embedding
if not self.legacy:
_UpperCAmelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCAmelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCAmelCase : Any = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCAmelCase : Optional[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
_UpperCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
_UpperCAmelCase : str = self.remap_to_used(a_ )
_UpperCAmelCase : Dict = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
_UpperCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self ,a_ ,a_ ) -> str:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCAmelCase : List[Any] = indices.reshape(shape[0] ,-1 ) # add batch axis
_UpperCAmelCase : Dict = self.unmap_to_all(a_ )
_UpperCAmelCase : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCAmelCase : Optional[Any] = self.embedding(a_ )
if shape is not None:
_UpperCAmelCase : int = z_q.view(a_ )
# reshape back to match original input shape
_UpperCAmelCase : str = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=False ) -> List[str]:
_UpperCAmelCase : Any = parameters
_UpperCAmelCase : Optional[Any] = torch.chunk(a_ ,2 ,dim=1 )
_UpperCAmelCase : List[Any] = torch.clamp(self.logvar ,-30.0 ,20.0 )
_UpperCAmelCase : List[str] = deterministic
_UpperCAmelCase : Tuple = torch.exp(0.5 * self.logvar )
_UpperCAmelCase : List[str] = torch.exp(self.logvar )
if self.deterministic:
_UpperCAmelCase : List[str] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _snake_case ( self ,a_ = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCAmelCase : str = randn_tensor(
self.mean.shape ,generator=a_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
_UpperCAmelCase : Optional[Any] = self.mean + self.std * sample
return x
def _snake_case ( self ,a_=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _snake_case ( self ,a_ ,a_=[1, 2, 3] ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCAmelCase : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=a_ )
def _snake_case ( self ) -> Optional[int]:
return self.mean
| 352 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 0 |
'''simple docstring'''
import random
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = a[left_index]
_UpperCAmelCase : Optional[int] = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
_UpperCAmelCase : Any = a[i], a[j]
i += 1
_UpperCAmelCase : Any = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if left < right:
_UpperCAmelCase : Tuple = random.randint(lowerCAmelCase_ , right - 1 )
_UpperCAmelCase : Optional[int] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase : Tuple = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : str = input("""Enter numbers separated by a comma:\n""" ).strip()
_UpperCAmelCase : int = [int(lowerCAmelCase_ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ = 10**9 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCAmelCase : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 0 |
'''simple docstring'''
A_ : List[str] = [0, 2, 4, 6, 8]
A_ : List[Any] = [1, 3, 5, 7, 9]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : List[str] = 0
for digit in range(10 ):
_UpperCAmelCase : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase_ , lowerCAmelCase_ )
return result
_UpperCAmelCase : str = 0
for digita in range(10 ):
_UpperCAmelCase : str = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : Any = ODD_DIGITS
else:
_UpperCAmelCase : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase_ , lowerCAmelCase_ , )
return result
def snake_case_ ( lowerCAmelCase_ = 9 )-> int:
'''simple docstring'''
_UpperCAmelCase : str = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase_ , 0 , [0] * length , lowerCAmelCase_ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = int(lowerCAmelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = divmod(lowerCAmelCase_ , 2 )
return binary_recursive(lowerCAmelCase_ ) + str(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Any = str(lowerCAmelCase_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_UpperCAmelCase : Optional[Any] = """-""" if number.startswith("""-""" ) else """"""
_UpperCAmelCase : Tuple = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(lowerCAmelCase_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
'''simple docstring'''
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : int = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase_ , id=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
if exitstatus == 5:
_UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
A_ : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
A_ : Tuple = doctest.OutputChecker
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,a_ ,a_ ,a_ )
A_ : str = CustomOutputChecker
A_ : str = HfDoctestModule
A_ : Optional[Any] = HfDocTestParser
| 357 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( lowerCAmelCase_ = "AAPL" )-> str:
'''simple docstring'''
_UpperCAmelCase : Any = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_UpperCAmelCase : Tuple = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , """html.parser""" )
_UpperCAmelCase : List[Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
_UpperCAmelCase : Optional[int] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("""sample_euler""" )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : int = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase : Tuple = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("""sample_euler""" )
_UpperCAmelCase : Optional[Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : Optional[int] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_UpperCAmelCase : List[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=a_ ,)
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : Dict = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 360 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _snake_case ( self ) -> Any:
super().setUp()
_UpperCAmelCase : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase : List[str] = dict(zip(a_ ,range(len(a_ ) ) ) )
_UpperCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _snake_case ( self ,**a_ ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> int:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> str:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _snake_case ( self ) -> List[Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_UpperCAmelCase : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = tokenizer(a_ ,max_length=len(a_ ) ,padding=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(a_ ,a_ )
@require_torch
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : int = tokenizer(a_ ,padding=a_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,a_ )
self.assertIn("""attention_mask""" ,a_ )
self.assertNotIn("""labels""" ,a_ )
self.assertNotIn("""decoder_attention_mask""" ,a_ )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[int] = tokenizer(text_target=a_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[str] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization."""]
_UpperCAmelCase : str = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Any = tokenizer(a_ ,return_tensors="""pt""" )
_UpperCAmelCase : Any = tokenizer(text_target=a_ ,return_tensors="""pt""" )
_UpperCAmelCase : List[str] = inputs["""input_ids"""]
_UpperCAmelCase : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = ["""Summary of the text.""", """Another summary."""]
_UpperCAmelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCAmelCase : Any = tokenizer(a_ ,padding=a_ )
_UpperCAmelCase : Any = [[0] * len(a_ ) for x in encoded_output["""input_ids"""]]
_UpperCAmelCase : Optional[int] = tokenizer.pad(a_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,a_ )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : int = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase : str = tokenizer_r.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
_UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ) -> Union[str, Any]:
_UpperCAmelCase : Dict = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=a_ ,default=a_ ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=a_ ,help="""Name of the model to download""" )
download_parser.set_defaults(func=a_ )
def __init__( self ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = model
_UpperCAmelCase : Tuple = cache
_UpperCAmelCase : Dict = force
_UpperCAmelCase : Dict = trust_remote_code
def _snake_case ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase : Tuple = torch.permute(lowerCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
_UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if "metadata" in layer:
_UpperCAmelCase : List[str] = layer.split("""metadata""" )
_UpperCAmelCase : int = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase : Any = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_UpperCAmelCase : Tuple = layer.split("""kvstore""" )
_UpperCAmelCase : List[str] = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase : Dict = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_UpperCAmelCase : str = layer.split("""/""" )
_UpperCAmelCase : Optional[int] = """/""".join(split_layer[:-1] )
_UpperCAmelCase : int = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase : Union[str, Any] = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase : Any = """file"""
else:
_UpperCAmelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = rename_keys(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = {}
for k, v in current_block.items():
_UpperCAmelCase : List[str] = v
_UpperCAmelCase : Optional[int] = new_current_block
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = WEIGHTS_NAME )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = convert_file_size_to_int(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = 0
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_UpperCAmelCase : List[str] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_UpperCAmelCase : Dict = flatten_dict(lowerCAmelCase_ , sep="""/""" )
_UpperCAmelCase : List[str] = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase : int = get_key_and_tensorstore_dict(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase : str = content
else:
_UpperCAmelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase : List[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = """/""".join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : str = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = shard
for key in shard:
_UpperCAmelCase : List[Any] = shard_file
# Add the metadata
_UpperCAmelCase : str = {"""total_size""": total_size}
_UpperCAmelCase : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase : Tuple = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n"""
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
A_ : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case_ ( )-> Tuple:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_UpperCAmelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_UpperCAmelCase : Dict = TaTokenizer.from_pretrained("""t5-small""" )
_UpperCAmelCase : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_UpperCAmelCase : List[str] = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
_UpperCAmelCase : Any = model.generate(lowerCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 363 |
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """trocr"""
UpperCAmelCase = ["""past_key_values"""]
UpperCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self ,a_=50_265 ,a_=1_024 ,a_=12 ,a_=16 ,a_=4_096 ,a_="gelu" ,a_=512 ,a_=0.1 ,a_=0.0 ,a_=0.0 ,a_=2 ,a_=0.02 ,a_=0.0 ,a_=True ,a_=False ,a_=True ,a_=True ,a_=1 ,a_=0 ,a_=2 ,**a_ ,) -> str:
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : str = d_model
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : List[str] = decoder_ffn_dim
_UpperCAmelCase : int = activation_function
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Tuple = attention_dropout
_UpperCAmelCase : List[str] = activation_dropout
_UpperCAmelCase : Optional[Any] = init_std
_UpperCAmelCase : List[Any] = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : Union[str, Any] = scale_embedding
_UpperCAmelCase : str = use_learned_position_embeddings
_UpperCAmelCase : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,decoder_start_token_id=a_ ,**a_ ,)
| 364 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
with open(lowerCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as f:
_UpperCAmelCase : List[Any] = f.read(1 )
_UpperCAmelCase : int = start_node
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = start_node
_UpperCAmelCase : Any = 0
while visiting not in first_solution:
_UpperCAmelCase : Optional[int] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase_ ) and k[0] not in first_solution:
_UpperCAmelCase : Optional[int] = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(lowerCAmelCase_ )
_UpperCAmelCase : Dict = best_node
first_solution.append(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : int = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(lowerCAmelCase_ )
for kn in solution[1:-1]:
_UpperCAmelCase : int = solution.index(lowerCAmelCase_ )
if n == kn:
continue
_UpperCAmelCase : Tuple = copy.deepcopy(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : List[str] = n
_UpperCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_UpperCAmelCase : List[str] = _tmp[_tmp.index(lowerCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : Dict = distance + int(i[1] )
_tmp.append(lowerCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = first_solution
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = distance_of_first_solution
_UpperCAmelCase : Dict = solution
while count <= iters:
_UpperCAmelCase : Any = find_neighborhood(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase_ ) - 1
_UpperCAmelCase : Optional[Any] = False
while not found:
_UpperCAmelCase : Tuple = 0
while i < len(lowerCAmelCase_ ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Any = best_solution[i]
_UpperCAmelCase : str = solution[i]
break
_UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = best_solution[:-1]
_UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : Tuple = cost
_UpperCAmelCase : List[Any] = solution
else:
_UpperCAmelCase : Any = index_of_best_solution + 1
_UpperCAmelCase : Dict = neighborhood[index_of_best_solution]
if len(lowerCAmelCase_ ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : Optional[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( lowerCAmelCase_=None )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = generate_neighbours(args.File )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = generate_first_solution(
args.File , lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase : str = tabu_search(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 349 | 0 |
import math
import qiskit
def snake_case_ ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
_UpperCAmelCase : List[str] = qiskit.QuantumRegister(4 , """qr""" )
_UpperCAmelCase : str = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
_UpperCAmelCase : Optional[int] = [input_a, input_a, carry_in]
_UpperCAmelCase : Any = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
_UpperCAmelCase : List[str] = qiskit.Aer.get_backend("""aer_simulator""" )
_UpperCAmelCase : List[Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 366 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = len(set_a.intersection(lowerCAmelCase_ ) )
if alternative_union:
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
else:
_UpperCAmelCase : Union[str, Any] = len(set_a.union(lowerCAmelCase_ ) )
return intersection / union
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(lowerCAmelCase_ , (list, tuple) ):
_UpperCAmelCase : Any = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCAmelCase : List[Any] = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / union
else:
_UpperCAmelCase : str = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return None
if __name__ == "__main__":
A_ : List[str] = {"""a""", """b""", """c""", """d""", """e"""}
A_ : Tuple = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 367 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DDPMParallelScheduler,)
def _snake_case ( self ,**a_ ) -> Dict:
_UpperCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**a_ )
return config
def _snake_case ( self ) -> str:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def _snake_case ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ ,beta_end=a_ )
def _snake_case ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _snake_case ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def _snake_case ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def _snake_case ( self ) -> Any:
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ ,prediction_type=a_ ,sample_max_value=a_ ,)
def _snake_case ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _snake_case ( self ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
_UpperCAmelCase : List[str] = self.dummy_sample_deter + 0.1
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter - 0.1
_UpperCAmelCase : Union[str, Any] = samplea.shape[0]
_UpperCAmelCase : Dict = torch.stack([samplea, samplea, samplea] ,dim=0 )
_UpperCAmelCase : List[str] = torch.arange(a_ )[0:3, None].repeat(1 ,a_ )
_UpperCAmelCase : Any = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_UpperCAmelCase : List[Any] = scheduler.batch_step_no_noise(a_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
_UpperCAmelCase : str = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : Dict = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : int = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = len(a_ )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
_UpperCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
_UpperCAmelCase : List[Any] = -1
else:
_UpperCAmelCase : int = timesteps[i + 1]
_UpperCAmelCase : Dict = scheduler.previous_timestep(a_ )
_UpperCAmelCase : str = prev_t.item()
self.assertEqual(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(a_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [100, 87, 50, 1, 0]
_UpperCAmelCase : Tuple = len(a_ )
with self.assertRaises(a_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=a_ ,timesteps=a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=a_ )
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |