language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/organization_test_fire_action.py
|
{
"start": 1868,
"end": 1962
}
|
class ____(TypedDict):
actions: list[str]
@region_silo_endpoint
|
TestFireActionErrorsResponse
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/dsls.py
|
{
"start": 7519,
"end": 10542
}
|
class ____(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
|
BroLexer
|
python
|
nedbat__coveragepy
|
tests/test_plugins.py
|
{
"start": 844,
"end": 1056
}
|
class ____(TPluginConfig):
"""A plugin configure thing when we don't really need one."""
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
return {} # pragma: never called
|
NullConfig
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
|
{
"start": 110183,
"end": 114896
}
|
class ____(DataplexCatalogBaseOperator):
"""
Update an EntryGroup resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogUpdateEntryGroupOperator`
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param location: Required. The ID of the Google Cloud region that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param entry_group_id: Required. ID of the EntryGroup to update.
:param entry_group_configuration: Required. The updated configuration body of the EntryGroup.
For more details please see API documentation:
https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.entryGroups#EntryGroup
:param validate_only: Optional. The service validates the request without performing any mutations.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param gcp_conn_id: Optional. The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"entry_group_id", "entry_group_configuration", "update_mask"}
| set(DataplexCatalogBaseOperator.template_fields)
)
operator_extra_links = (DataplexCatalogEntryGroupLink(),)
def __init__(
self,
entry_group_id: str,
entry_group_configuration: dict | EntryGroup,
update_mask: list[str] | FieldMask | None = None,
validate_request: bool | None = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_group_id = entry_group_id
self.entry_group_configuration = entry_group_configuration
self.update_mask = update_mask
self.validate_request = validate_request
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"entry_group_id": self.entry_group_id,
}
def execute(self, context: Context):
DataplexCatalogEntryGroupLink.persist(context=context)
if self.validate_request:
self.log.info("Validating an Update Dataplex Catalog EntryGroup request.")
else:
self.log.info(
"Updating Dataplex Catalog EntryGroup %s.",
self.entry_group_id,
)
try:
operation = self.hook.update_entry_group(
location=self.location,
project_id=self.project_id,
entry_group_id=self.entry_group_id,
entry_group_configuration=self.entry_group_configuration,
update_mask=self.update_mask,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
entry_group = self.hook.wait_for_operation(timeout=self.timeout, operation=operation)
except NotFound as ex:
self.log.info("Specified EntryGroup was not found.")
raise AirflowException(ex)
except Exception as exc:
raise AirflowException(exc)
else:
result = EntryGroup.to_dict(entry_group) if not self.validate_request else None
if not self.validate_request:
self.log.info("EntryGroup %s was successfully updated.", self.entry_group_id)
return result
|
DataplexCatalogUpdateEntryGroupOperator
|
python
|
Netflix__metaflow
|
metaflow/parameters.py
|
{
"start": 3214,
"end": 8843
}
|
class ____(object):
"""
This a wrapper object for a user-defined function that is called
at deploy time to populate fields in a Parameter. The wrapper
is needed to make Click show the actual value returned by the
function instead of a function pointer in its help text. Also, this
object curries the context argument for the function, and pretty
prints any exceptions that occur during evaluation.
"""
def __init__(
self,
parameter_name,
parameter_type,
field,
fun,
return_str=True,
print_representation=None,
):
self.fun = fun
self.field = field
self.parameter_name = parameter_name
self.parameter_type = parameter_type
self.return_str = return_str
self.print_representation = self.user_print_representation = (
print_representation
)
if self.print_representation is None:
self.print_representation = str(self.fun)
def __call__(self, deploy_time=False):
# This is called in two ways:
# - through the normal Click default parameter evaluation: if a default
# value is a callable, Click will call it without any argument. In other
# words, deploy_time=False. This happens for a normal "run" or the "trigger"
# functions for step-functions for example. Anything that has the
# @add_custom_parameters decorator will trigger this. Once click calls this,
# it will then pass the resulting value to the convert() functions for the
# type for that Parameter.
# - by deploy_time_eval which is invoked to process the parameters at
# deploy_time and outside of click processing (ie: at that point, Click
# is not involved since anytime deploy_time_eval is called, no custom parameters
# have been added). In that situation, deploy_time will be True. Note that in
# this scenario, the value should be something that can be converted to JSON.
# The deploy_time value can therefore be used to determine which type of
# processing is requested.
ctx = context_proto._replace(parameter_name=self.parameter_name)
try:
try:
# Most user-level functions may not care about the deploy_time parameter
# but IncludeFile does.
val = self.fun(ctx, deploy_time)
except TypeError:
val = self.fun(ctx)
except:
raise ParameterFieldFailed(self.parameter_name, self.field)
else:
return self._check_type(val, deploy_time)
def _check_type(self, val, deploy_time):
# it is easy to introduce a deploy-time function that accidentally
# returns a value whose type is not compatible with what is defined
# in Parameter. Let's catch those mistakes early here, instead of
# showing a cryptic stack trace later.
# note: this doesn't work with long in Python2 or types defined as
# click types, e.g. click.INT
TYPES = {bool: "bool", int: "int", float: "float", list: "list", dict: "dict"}
msg = (
"The value returned by the deploy-time function for "
"the parameter *%s* field *%s* has a wrong type. "
% (self.parameter_name, self.field)
)
if isinstance(self.parameter_type, list):
if not any(isinstance(val, x) for x in self.parameter_type):
msg += "Expected one of the following %s." % TYPES[self.parameter_type]
raise ParameterFieldTypeMismatch(msg)
return str(val) if self.return_str else val
elif self.parameter_type in TYPES:
if type(val) != self.parameter_type:
msg += "Expected a %s." % TYPES[self.parameter_type]
raise ParameterFieldTypeMismatch(msg)
return str(val) if self.return_str else val
else:
if deploy_time:
try:
if not is_stringish(val):
val = json.dumps(val)
except TypeError:
msg += "Expected a JSON-encodable object or a string."
raise ParameterFieldTypeMismatch(msg)
return val
# If not deploy_time, we expect a string
if not is_stringish(val):
msg += "Expected a string."
raise ParameterFieldTypeMismatch(msg)
return val
@property
def description(self):
return self.print_representation
def __str__(self):
if self.user_print_representation:
return self.user_print_representation
return self()
def __repr__(self):
if self.user_print_representation:
return self.user_print_representation
return self()
def deploy_time_eval(value):
if isinstance(value, DeployTimeField):
return value(deploy_time=True)
elif isinstance(value, DelayedEvaluationParameter):
return value(return_str=True)
else:
return value
# this is called by cli.main
def set_parameter_context(flow_name, echo, datastore, configs):
from .user_configs.config_parameters import (
ConfigValue,
) # Prevent circular dependency
global context_proto
context_proto = ParameterContext(
flow_name=flow_name,
user_name=get_username(),
parameter_name=None,
logger=echo,
ds_type=datastore.TYPE,
configs=ConfigValue(dict(configs)),
)
|
DeployTimeField
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/accelerators/test_xla.py
|
{
"start": 1502,
"end": 4046
}
|
class ____(BoringModel):
def __init__(self):
super(BoringModel, self).__init__()
self.layer_1 = nn.Linear(32, 10, bias=False)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.layer_3 = nn.Linear(32, 10, bias=False)
self.layer_3.weight = self.layer_1.weight
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
return self.layer_3(x)
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_resume_training_on_cpu(tmp_path):
"""Checks if training can be resumed from a saved checkpoint on CPU."""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto", default_root_dir=tmp_path)
trainer.fit(model)
if trainer.world_size != trainer.num_devices:
# we're in multinode. unless the filesystem is shared, only the main node will have access to the checkpoint
# since we cannot know this, the code below needs to be skipped
return
model_path = trainer.checkpoint_callback.best_model_path
# Verify saved Tensors are on CPU
ckpt = torch.load(model_path, weights_only=True)
weight_tensor = list(ckpt["state_dict"].values())[0]
assert weight_tensor.device == torch.device("cpu")
# Verify that training is resumed on CPU
trainer = Trainer(max_epochs=1, default_root_dir=tmp_path)
trainer.fit(model, ckpt_path=model_path)
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_if_test_works_after_train(tmp_path):
"""Ensure that .test() works after .fit()"""
model = BoringModel()
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto", default_root_dir=tmp_path, fast_dev_run=True)
trainer.fit(model)
out = trainer.test(model)
assert len(out) == 1
@RunIf(skip_windows=True)
def test_accelerator_cpu_when_tpu_available(tpu_available):
assert XLAAccelerator.is_available()
trainer = Trainer(accelerator="cpu", devices=8)
assert isinstance(trainer.accelerator, CPUAccelerator)
@RunIf(skip_windows=True)
@pytest.mark.parametrize(("accelerator", "devices"), [("auto", 8), ("auto", "auto"), ("tpu", "auto")])
def test_accelerator_tpu(accelerator, devices, tpu_available):
assert XLAAccelerator.is_available()
trainer = Trainer(accelerator=accelerator, devices=devices)
assert isinstance(trainer.accelerator, XLAAccelerator)
assert isinstance(trainer.strategy, XLAStrategy)
|
WeightSharingModule
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/uninitialized_attributes.py
|
{
"start": 58,
"end": 123
}
|
class ____(Base):
attr3: int #: docstring
attr4: str
|
Derived
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py
|
{
"start": 13668,
"end": 16940
}
|
class ____(object):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None, **kwargs):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
"""
super(ResidualWrapperBase, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope_v2(type(self).__name__ + "ZeroState"):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
def get_config(self):
"""Returns the config of the residual wrapper."""
if self._residual_fn is not None:
function, function_type, function_module = _serialize_function_to_config(
self._residual_fn)
config = {
"residual_fn": function,
"residual_fn_type": function_type,
"residual_fn_module": function_module
}
else:
config = {}
base_config = super(ResidualWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "residual_fn" in config:
config = config.copy()
residual_function = _parse_config_to_function(config, custom_objects,
"residual_fn",
"residual_fn_type",
"residual_fn_module")
config["residual_fn"] = residual_function
return super(ResidualWrapperBase, cls).from_config(
config, custom_objects=custom_objects)
|
ResidualWrapperBase
|
python
|
django__django
|
tests/aggregation_regress/models.py
|
{
"start": 3288,
"end": 3603
}
|
class ____(models.Model):
recipe = models.ForeignKey("RecipeUnmanaged", models.CASCADE)
author = models.ForeignKey(
AuthorUnmanaged, models.CASCADE, db_column="authorproxy_id"
)
class Meta:
managed = False
db_table = Recipe.tasters.through._meta.db_table
|
RecipeTasterUnmanaged
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/resources/beta/beta.py
|
{
"start": 4685,
"end": 5342
}
|
class ____:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def models(self) -> ModelsWithStreamingResponse:
return ModelsWithStreamingResponse(self._beta.models)
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
return MessagesWithStreamingResponse(self._beta.messages)
@cached_property
def files(self) -> FilesWithStreamingResponse:
return FilesWithStreamingResponse(self._beta.files)
@cached_property
def skills(self) -> SkillsWithStreamingResponse:
return SkillsWithStreamingResponse(self._beta.skills)
|
BetaWithStreamingResponse
|
python
|
scipy__scipy
|
scipy/_lib/tests/test_bunch.py
|
{
"start": 279,
"end": 6389
}
|
class ____:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Tests with Result
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setup_method(self):
# Set up an instance of Result.
self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
def test_attribute_access(self):
assert_equal(self.result.x, 1)
assert_equal(self.result.y, 2)
assert_equal(self.result.z, 3)
assert_equal(self.result.w, 99)
assert_equal(self.result.beta, 0.5)
def test_indexing(self):
assert_equal(self.result[0], 1)
assert_equal(self.result[1], 2)
assert_equal(self.result[2], 3)
assert_equal(self.result[-1], 3)
with pytest.raises(IndexError, match='index out of range'):
self.result[3]
def test_unpacking(self):
x0, y0, z0 = self.result
assert_equal((x0, y0, z0), (1, 2, 3))
assert_equal(self.result, (1, 2, 3))
def test_slice(self):
assert_equal(self.result[1:], (2, 3))
assert_equal(self.result[::2], (1, 3))
assert_equal(self.result[::-1], (3, 2, 1))
def test_len(self):
assert_equal(len(self.result), 3)
def test_repr(self):
s = repr(self.result)
assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
def test_hash(self):
assert_equal(hash(self.result), hash((1, 2, 3)))
def test_pickle(self):
s = pickle.dumps(self.result)
obj = pickle.loads(s)
assert isinstance(obj, Result)
assert_equal(obj.x, self.result.x)
assert_equal(obj.y, self.result.y)
assert_equal(obj.z, self.result.z)
assert_equal(obj.w, self.result.w)
assert_equal(obj.beta, self.result.beta)
def test_read_only_existing(self):
with pytest.raises(AttributeError, match="can't set attribute"):
self.result.x = -1
def test_read_only_new(self):
self.result.plate_of_shrimp = "lattice of coincidence"
assert self.result.plate_of_shrimp == "lattice of coincidence"
def test_constructor_missing_parameter(self):
with pytest.raises(TypeError, match='missing'):
# `w` is missing.
Result(x=1, y=2, z=3, beta=0.75)
def test_constructor_incorrect_parameter(self):
with pytest.raises(TypeError, match='unexpected'):
# `foo` is not an existing field.
Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
def test_module(self):
m = 'scipy._lib.tests.test_bunch'
assert_equal(Result.__module__, m)
assert_equal(self.result.__module__, m)
def test_extra_fields_per_instance(self):
# This test exists to ensure that instances of the same class
# store their own values for the extra fields. That is, the values
# are stored per instance and not in the class.
result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
assert_equal(result1.w, -1)
assert_equal(result1.beta, 0.0)
# The rest of these checks aren't essential, but let's check
# them anyway.
assert_equal(result1[:], (1, 2, 3))
assert_equal(result2.w, 99)
assert_equal(result2.beta, 1.0)
assert_equal(result2[:], (4, 5, 6))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Other tests
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_extra_field_names_is_optional(self):
Square = _make_tuple_bunch('Square', ['width', 'height'])
sq = Square(width=1, height=2)
assert_equal(sq.width, 1)
assert_equal(sq.height, 2)
s = repr(sq)
assert_equal(s, 'Square(width=1, height=2)')
def test_tuple_like(self):
Tup = _make_tuple_bunch('Tup', ['a', 'b'])
tu = Tup(a=1, b=2)
assert isinstance(tu, tuple)
assert isinstance(tu + (1,), tuple)
def test_explicit_module(self):
m = 'some.module.name'
Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
foo = Foo(x=1, a=355, b=113)
assert_equal(Foo.__module__, m)
assert_equal(foo.__module__, m)
def test_passes_polars_checks(self):
# gh-22450
Square = _make_tuple_bunch('Square', ['width', 'height'])
assert hasattr(Square, '_replace')
assert hasattr(Square, '_field_defaults')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument validation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@pytest.mark.parametrize('args', [('123', ['a'], ['b']),
('Foo', ['-3'], ['x']),
('Foo', ['a'], ['+-*/'])])
def test_identifiers_not_allowed(self, args):
with pytest.raises(ValueError, match='identifiers'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
('Foo', ['a', 'b'], ['b', 'x'])])
def test_repeated_field_names(self, args):
with pytest.raises(ValueError, match='Duplicate'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
('Foo', ['a'], ['_x'])])
def test_leading_underscore_not_allowed(self, args):
with pytest.raises(ValueError, match='underscore'):
_make_tuple_bunch(*args)
@pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
('Foo', ['a'], ['or']),
('and', ['a'], ['x'])])
def test_keyword_not_allowed_in_fields(self, args):
with pytest.raises(ValueError, match='keyword'):
_make_tuple_bunch(*args)
def test_at_least_one_field_name_required(self):
with pytest.raises(ValueError, match='at least one name'):
_make_tuple_bunch('Qwerty', [], ['a', 'b'])
|
TestMakeTupleBunch
|
python
|
coleifer__peewee
|
playhouse/pool.py
|
{
"start": 9700,
"end": 10017
}
|
class ____(PooledDatabase, MySQLDatabase):
def _is_closed(self, conn):
if self.server_version[0] == 8:
args = ()
else:
args = (False,)
try:
conn.ping(*args)
except:
return True
else:
return False
|
PooledMySQLDatabase
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_poincare.py
|
{
"start": 1515,
"end": 10813
}
|
class ____(unittest.TestCase):
def setUp(self):
self.data = PoincareRelations(datapath('poincare_hypernyms.tsv'))
self.data_large = PoincareRelations(datapath('poincare_hypernyms_large.tsv'))
def models_equal(self, model_1, model_2):
self.assertEqual(len(model_1.kv), len(model_2.kv))
self.assertEqual(set(model_1.kv.index_to_key), set(model_2.kv.index_to_key))
self.assertTrue(np.allclose(model_1.kv.vectors, model_2.kv.vectors))
def test_data_counts(self):
"""Tests whether data has been loaded correctly and completely."""
model = PoincareModel(self.data)
self.assertEqual(len(model.all_relations), 5)
self.assertEqual(len(model.node_relations[model.kv.get_index('kangaroo.n.01')]), 3)
self.assertEqual(len(model.kv), 7)
self.assertTrue('mammal.n.01' not in model.node_relations)
def test_data_counts_with_bytes(self):
"""Tests whether input bytes data is loaded correctly and completely."""
model = PoincareModel([(b'\x80\x01c', b'\x50\x71a'), (b'node.1', b'node.2')])
self.assertEqual(len(model.all_relations), 2)
self.assertEqual(len(model.node_relations[model.kv.get_index(b'\x80\x01c')]), 1)
self.assertEqual(len(model.kv), 4)
self.assertTrue(b'\x50\x71a' not in model.node_relations)
def test_persistence(self):
"""Tests whether the model is saved and loaded correctly."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(get_testfile())
loaded = PoincareModel.load(get_testfile())
self.models_equal(model, loaded)
def test_persistence_separate_file(self):
"""Tests whether the model is saved and loaded correctly when the arrays are stored separately."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(get_testfile(), sep_limit=1)
loaded = PoincareModel.load(get_testfile())
self.models_equal(model, loaded)
def test_online_learning(self):
"""Tests whether additional input data is loaded correctly and completely."""
model = PoincareModel(self.data, burn_in=0, negative=3)
self.assertEqual(len(model.kv), 7)
self.assertEqual(model.kv.get_vecattr('kangaroo.n.01', 'count'), 3)
self.assertEqual(model.kv.get_vecattr('cat.n.01', 'count'), 1)
model.build_vocab([('kangaroo.n.01', 'cat.n.01')], update=True) # update vocab
self.assertEqual(model.kv.get_vecattr('kangaroo.n.01', 'count'), 4)
self.assertEqual(model.kv.get_vecattr('cat.n.01', 'count'), 2)
def test_train_after_load(self):
"""Tests whether the model can be trained correctly after loading from disk."""
model = PoincareModel(self.data, burn_in=0, negative=3)
model.train(epochs=1)
model.save(get_testfile())
loaded = PoincareModel.load(get_testfile())
model.train(epochs=1)
loaded.train(epochs=1)
self.models_equal(model, loaded)
def test_persistence_old_model(self):
"""Tests whether model from older gensim version is loaded correctly."""
loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))
self.assertEqual(loaded.kv.vectors.shape, (239, 2))
self.assertEqual(len(loaded.kv), 239)
self.assertEqual(loaded.size, 2)
self.assertEqual(len(loaded.all_relations), 200)
def test_train_old_model_after_load(self):
"""Tests whether loaded model from older gensim version can be trained correctly."""
loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))
old_vectors = np.copy(loaded.kv.vectors)
loaded.train(epochs=2)
self.assertFalse(np.allclose(old_vectors, loaded.kv.vectors))
def test_invalid_data_raises_error(self):
"""Tests that error is raised on invalid input data."""
with self.assertRaises(ValueError):
PoincareModel([("a", "b", "c")])
with self.assertRaises(ValueError):
PoincareModel(["a", "b", "c"])
with self.assertRaises(ValueError):
PoincareModel("ab")
def test_vector_shape(self):
"""Tests whether vectors are initialized with the correct size."""
model = PoincareModel(self.data, size=20)
self.assertEqual(model.kv.vectors.shape, (7, 20))
def test_vector_dtype(self):
"""Tests whether vectors have the correct dtype before and after training."""
model = PoincareModel(self.data_large, dtype=np.float32, burn_in=0, negative=3)
self.assertEqual(model.kv.vectors.dtype, np.float32)
model.train(epochs=1)
self.assertEqual(model.kv.vectors.dtype, np.float32)
def test_training(self):
"""Tests that vectors are different before and after training."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=2)
self.assertFalse(np.allclose(old_vectors, model.kv.vectors))
def test_training_multiple(self):
"""Tests that calling train multiple times results in different vectors."""
model = PoincareModel(self.data_large, burn_in=0, negative=3)
model.train(epochs=2)
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=1)
self.assertFalse(np.allclose(old_vectors, model.kv.vectors))
old_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertTrue(np.allclose(old_vectors, model.kv.vectors))
def test_gradients_check(self):
"""Tests that the model is trained successfully with gradients check enabled."""
model = PoincareModel(self.data, negative=3)
try:
model.train(epochs=1, batch_size=1, check_gradients_every=1)
except Exception as e:
self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))
@unittest.skipIf(not autograd_installed, 'autograd needs to be installed for this test')
def test_wrong_gradients_raises_assertion(self):
"""Tests that discrepancy in gradients raises an error."""
model = PoincareModel(self.data, negative=3)
model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))
with self.assertRaises(AssertionError):
model.train(epochs=1, batch_size=1, check_gradients_every=1)
def test_reproducible(self):
"""Tests that vectors are same for two independent models trained with the same seed."""
model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_1.train(epochs=2)
model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)
model_2.train(epochs=2)
self.assertTrue(np.allclose(model_1.kv.vectors, model_2.kv.vectors))
def test_burn_in(self):
"""Tests that vectors are different after burn-in."""
model = PoincareModel(self.data, burn_in=1, negative=3)
original_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertFalse(np.allclose(model.kv.vectors, original_vectors))
def test_burn_in_only_done_once(self):
"""Tests that burn-in does not happen when train is called a second time."""
model = PoincareModel(self.data, negative=3, burn_in=1)
model.train(epochs=0)
original_vectors = np.copy(model.kv.vectors)
model.train(epochs=0)
self.assertTrue(np.allclose(model.kv.vectors, original_vectors))
def test_negatives(self):
"""Tests that correct number of negatives are sampled."""
model = PoincareModel(self.data, negative=5)
self.assertEqual(len(model._get_candidate_negatives()), 5)
def test_error_if_negative_more_than_population(self):
"""Tests error is rased if number of negatives to sample is more than remaining nodes."""
model = PoincareModel(self.data, negative=5)
with self.assertRaises(ValueError):
model.train(epochs=1)
def test_no_duplicates_and_positives_in_negative_sample(self):
"""Tests that no duplicates or positively related nodes are present in negative samples."""
model = PoincareModel(self.data_large, negative=3)
positive_nodes = model.node_relations[0] # Positive nodes for node 0
num_samples = 100 # Repeat experiment multiple times
for i in range(num_samples):
negatives = model._sample_negatives(0)
self.assertFalse(positive_nodes & set(negatives))
self.assertEqual(len(negatives), len(set(negatives)))
def test_handle_duplicates(self):
"""Tests that correct number of negatives are used."""
vector_updates = np.array([[0.5, 0.5], [0.1, 0.2], [0.3, -0.2]])
node_indices = [0, 1, 0]
PoincareModel._handle_duplicates(vector_updates, node_indices)
vector_updates_expected = np.array([[0.0, 0.0], [0.1, 0.2], [0.8, 0.3]])
self.assertTrue((vector_updates == vector_updates_expected).all())
@classmethod
def tearDownClass(cls):
try:
os.unlink(get_testfile())
except OSError:
pass
|
TestPoincareModel
|
python
|
spack__spack
|
lib/spack/spack/util/elf.py
|
{
"start": 564,
"end": 780
}
|
class ____(NamedTuple):
sh_name: int
sh_type: int
sh_flags: int
sh_addr: int
sh_offset: int
sh_size: int
sh_link: int
sh_info: int
sh_addralign: int
sh_entsize: int
|
SectionHeader
|
python
|
docker__docker-py
|
tests/unit/utils_test.py
|
{
"start": 9235,
"end": 12364
}
|
class ____(unittest.TestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
'ssh://:22/path',
'tcp://netloc:3333/path?q=1',
'unix:///sock/path#fragment',
'https://netloc:3333/path;params',
'ssh://:clearpassword@host:22',
]
valid_hosts = {
'0.0.0.1:5555': 'http://0.0.0.1:5555',
':6666': 'http://127.0.0.1:6666',
'tcp://:7777': 'http://127.0.0.1:7777',
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix:///var/run/docker.sock',
'12.234.45.127:2375/docker/engine': (
'http://12.234.45.127:2375/docker/engine'
),
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
'[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
'[fd12::82d1]:2375/docker/engine': (
'http://[fd12::82d1]:2375/docker/engine'
),
'ssh://[fd12::82d1]': 'ssh://[fd12::82d1]:22',
'ssh://user@[fd12::82d1]:8765': 'ssh://user@[fd12::82d1]:8765',
'ssh://': 'ssh://127.0.0.1:22',
'ssh://user@localhost:22': 'ssh://user@localhost:22',
'ssh://user@remote': 'ssh://user@remote:22',
}
for host in invalid_hosts:
msg = f'Should have failed to parse invalid host: {host}'
with self.assertRaises(DockerException, msg=msg):
parse_host(host, None)
for host, expected in valid_hosts.items():
self.assertEqual(
parse_host(host, None),
expected,
msg=f'Failed to parse valid host: {host}',
)
def test_parse_host_empty_value(self):
unix_socket = 'http+unix:///var/run/docker.sock'
npipe = 'npipe:////./pipe/docker_engine'
for val in [None, '']:
assert parse_host(val, is_win32=False) == unix_socket
assert parse_host(val, is_win32=True) == npipe
def test_parse_host_tls(self):
host_value = 'myhost.docker.net:3348'
expected_result = 'https://myhost.docker.net:3348'
assert parse_host(host_value, tls=True) == expected_result
def test_parse_host_tls_tcp_proto(self):
host_value = 'tcp://myhost.docker.net:3348'
expected_result = 'https://myhost.docker.net:3348'
assert parse_host(host_value, tls=True) == expected_result
def test_parse_host_trailing_slash(self):
host_value = 'tcp://myhost.docker.net:2376/'
expected_result = 'http://myhost.docker.net:2376'
assert parse_host(host_value) == expected_result
|
ParseHostTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/util_test.py
|
{
"start": 21147,
"end": 22716
}
|
class ____(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([2, 3, 4]), shape)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([0]), shape)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([]), shape)
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))
|
PreferStaticShapeTest
|
python
|
huggingface__transformers
|
src/transformers/models/splinter/modeling_splinter.py
|
{
"start": 8443,
"end": 9099
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter
|
SplinterIntermediate
|
python
|
has2k1__plotnine
|
tests/test_geom_smooth.py
|
{
"start": 6025,
"end": 7057
}
|
class ____:
p = ggplot(data, aes("x", "y")) + geom_point()
def test_lm(self):
p = self.p + stat_smooth(
method="lm", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "lm_formula"
def test_lm_weights(self):
p = (
self.p
+ aes(weight="x.abs()")
+ stat_smooth(
method="lm", formula="y ~ np.sin(x)", fill="red", se=True
)
)
assert p == "lm_formula_weights"
def test_glm(self):
p = self.p + stat_smooth(
method="glm", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "glm_formula"
def test_rlm(self):
p = self.p + stat_smooth(
method="rlm", formula="y ~ np.sin(x)", fill="red", se=False
)
assert p == "rlm_formula"
def test_gls(self):
p = self.p + stat_smooth(
method="gls", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "gls_formula"
|
TestFormula
|
python
|
huggingface__transformers
|
tests/models/auto/test_processor_auto.py
|
{
"start": 20998,
"end": 27685
}
|
class ____(unittest.TestCase):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def setUpClass(cls):
cls._token = TOKEN
def test_push_to_hub_via_save_pretrained(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
processor = Wav2Vec2Processor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR)
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token)
new_processor = Wav2Vec2Processor.from_pretrained(tmp_repo.repo_id)
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_processor.feature_extractor, k))
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab())
def test_push_to_hub_in_organization_via_save_pretrained(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
processor = Wav2Vec2Processor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR)
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
tmp_dir,
repo_id=tmp_repo.repo_id,
push_to_hub=True,
token=self._token,
)
new_processor = Wav2Vec2Processor.from_pretrained(tmp_repo.repo_id)
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_processor.feature_extractor, k))
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab())
def test_push_to_hub_dynamic_processor(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR)
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = CustomTokenizer(vocab_file)
processor = CustomProcessor(feature_extractor, tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
snapshot_download(tmp_repo.repo_id, token=self._token)
processor.save_pretrained(tmp_dir)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map,
{
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
},
)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(tmp_dir, "tokenizer_config.json")) as f:
tokenizer_config = json.load(f)
self.assertDictEqual(
tokenizer_config["auto_map"],
{
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
},
)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_feature_extraction.py")))
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_tokenization.py")))
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_processing.py")))
upload_folder(repo_id=tmp_repo.repo_id, folder_path=tmp_dir, token=self._token)
new_processor = AutoProcessor.from_pretrained(tmp_repo.repo_id, trust_remote_code=True)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__, "CustomProcessor")
def test_push_to_hub_with_chat_templates(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# Extract vocab and merges from SentencePiece model
extractor = SentencePieceExtractor(SAMPLE_VOCAB_LLAMA)
vocab_ids, vocab_scores, merges = extractor.extract()
tokenizer = LlamaTokenizer(vocab=vocab_scores, merges=merges)
image_processor = SiglipImageProcessor()
chat_template = "default dummy template for testing purposes only"
processor = LlavaProcessor(
tokenizer=tokenizer, image_processor=image_processor, chat_template=chat_template
)
self.assertEqual(processor.chat_template, chat_template)
with TemporaryHubRepo(token=self._token) as tmp_repo:
processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, token=self._token, push_to_hub=True)
reloaded_processor = LlavaProcessor.from_pretrained(tmp_repo.repo_id)
self.assertEqual(processor.chat_template, reloaded_processor.chat_template)
# When we save as single files, tokenizers and processors share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_processor.chat_template, reloaded_processor.tokenizer.chat_template)
with TemporaryHubRepo(token=self._token) as tmp_repo:
processor.chat_template = {"default": "a", "secondary": "b"}
processor.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, token=self._token, push_to_hub=True)
reloaded_processor = LlavaProcessor.from_pretrained(tmp_repo.repo_id)
self.assertEqual(processor.chat_template, reloaded_processor.chat_template)
# When we save as single files, tokenizers and processors share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_processor.chat_template, reloaded_processor.tokenizer.chat_template)
|
ProcessorPushToHubTester
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/response_model/tutorial001_py310.py
|
{
"start": 99,
"end": 1050
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=Hero)
def create_hero(hero: Hero):
with Session(engine) as session:
session.add(hero)
session.commit()
session.refresh(hero)
return hero
@app.get("/heroes/", response_model=list[Hero])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
|
Hero
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0124_remove_zh_locale.py
|
{
"start": 150,
"end": 14444
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0123_deprecate_old_vcs"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="language",
field=models.CharField(
choices=[
("aa", "Afar"),
("ab", "Abkhaz"),
("acr", "Achi"),
("af", "Afrikaans"),
("agu", "Awakateko"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("caa", "Ch'orti'"),
("cac", "Chuj"),
("cab", "Garífuna"),
("cak", "Kaqchikel"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("itz", "Itza'"),
("iu", "Inuktitut"),
("ixl", "Ixil"),
("ja", "Japanese"),
("jac", "Popti'"),
("jv", "Javanese"),
("ka", "Georgian"),
("kjb", "Q'anjob'al"),
("kek", "Q'eqchi'"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("knj", "Akateko"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mam", "Mam"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mop", "Mopan"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("pnb", "Western Punjabi"),
("poc", "Poqomam"),
("poh", "Poqomchi"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("quc", "K'iche'"),
("qum", "Sipakapense"),
("quv", "Sakapulteko"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("skr", "Saraiki"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("ttc", "Tektiteko"),
("tzj", "Tz'utujil"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("usp", "Uspanteko"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("xin", "Xinka"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zu", "Zulu"),
("nb-no", "Norwegian Bokmal"),
("pt-br", "Brazilian Portuguese"),
("es-mx", "Mexican Spanish"),
("uk-ua", "Ukrainian"),
("zh-cn", "Simplified Chinese"),
("zh-tw", "Traditional Chinese"),
],
default="en",
help_text="The language the project documentation is rendered in. Note: this affects your project's URL.",
max_length=20,
verbose_name="Language",
),
),
migrations.AlterField(
model_name="project",
name="language",
field=models.CharField(
choices=[
("aa", "Afar"),
("ab", "Abkhaz"),
("acr", "Achi"),
("af", "Afrikaans"),
("agu", "Awakateko"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("caa", "Ch'orti'"),
("cac", "Chuj"),
("cab", "Garífuna"),
("cak", "Kaqchikel"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("itz", "Itza'"),
("iu", "Inuktitut"),
("ixl", "Ixil"),
("ja", "Japanese"),
("jac", "Popti'"),
("jv", "Javanese"),
("ka", "Georgian"),
("kjb", "Q'anjob'al"),
("kek", "Q'eqchi'"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("knj", "Akateko"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mam", "Mam"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mop", "Mopan"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("pnb", "Western Punjabi"),
("poc", "Poqomam"),
("poh", "Poqomchi"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("quc", "K'iche'"),
("qum", "Sipakapense"),
("quv", "Sakapulteko"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("skr", "Saraiki"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("ttc", "Tektiteko"),
("tzj", "Tz'utujil"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("usp", "Uspanteko"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("xin", "Xinka"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zu", "Zulu"),
("nb-no", "Norwegian Bokmal"),
("pt-br", "Brazilian Portuguese"),
("es-mx", "Mexican Spanish"),
("uk-ua", "Ukrainian"),
("zh-cn", "Simplified Chinese"),
("zh-tw", "Traditional Chinese"),
],
default="en",
help_text="The language the project documentation is rendered in. Note: this affects your project's URL.",
max_length=20,
verbose_name="Language",
),
),
]
|
Migration
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-weighted-subgraph-with-the-required-paths-ii.py
|
{
"start": 786,
"end": 2703
}
|
class ____(object):
def minimumWeight(self, edges, queries):
"""
:type edges: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
def iter_dfs():
lookup = [False]*len(adj)
lookup2 = [[] for _ in xrange(len(adj))]
for i, q in enumerate(queries):
for x in q:
lookup2[x].append(i)
uf = UnionFind(len(adj))
ancestor = range(len(adj))
dist = [0]*len(adj)
result = [0]*len(queries)
stk = [(1, (0,))]
while stk:
step, args = stk.pop()
if step == 1:
u = args[0]
for i in lookup2[u]:
result[i] += dist[u]
for x in queries[i]:
if lookup[x]:
result[i] -= dist[ancestor[uf.find_set(x)]]
lookup[u] = True
stk.append((2, (u, 0)))
elif step == 2:
u, i = args
if i == len(adj[u]):
continue
v, w = adj[u][i]
stk.append((2, (u, i+1)))
if lookup[v]:
continue
dist[v] = dist[u]+w
stk.append((3, (v, u)))
stk.append((1, (v, u)))
elif step == 3:
v, u = args
uf.union_set(v, u)
ancestor[uf.find_set(u)] = u
return result
adj = [[] for _ in xrange(len(edges)+1)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
return iter_dfs()
# Time: O(n + q)
# Space: O(n + q)
# dfs, Tarjan's Offline LCA Algorithm
|
Solution
|
python
|
networkx__networkx
|
networkx/exception.py
|
{
"start": 464,
"end": 551
}
|
class ____(Exception):
"""Base class for exceptions in NetworkX."""
|
NetworkXException
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_assorted_poly.py
|
{
"start": 72935,
"end": 78939
}
|
class ____(AssertsCompiledSQL, fixtures.TestBase):
"""tests for #8168 which was fixed by #8456"""
__dialect__ = "default"
@testing.fixture
def mapping(self, decl_base):
Base = decl_base
def go(scenario, use_poly, use_poly_on_retailer):
class Customer(Base):
__tablename__ = "customer"
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "customer",
}
class Store(Customer):
__tablename__ = "store"
id = Column(
Integer, ForeignKey("customer.id"), primary_key=True
)
retailer_id = Column(Integer, ForeignKey("retailer.id"))
retailer = relationship(
"Retailer",
back_populates="stores",
foreign_keys=[retailer_id],
)
__mapper_args__ = {
"polymorphic_identity": "store",
"polymorphic_load": "inline" if use_poly else None,
}
class Retailer(Customer):
__tablename__ = "retailer"
id = Column(
Integer, ForeignKey("customer.id"), primary_key=True
)
stores = relationship(
"Store",
back_populates="retailer",
foreign_keys=[Store.retailer_id],
)
if scenario.mapped_cls:
store_tgt = corr_except = Store
elif scenario.table:
corr_except = Store.__table__
store_tgt = Store.__table__.c
elif scenario.table_alias:
corr_except = Store.__table__.alias()
store_tgt = corr_except.c
else:
scenario.fail()
store_count = column_property(
select(func.count(store_tgt.id))
.where(store_tgt.retailer_id == id)
.correlate_except(corr_except)
.scalar_subquery()
)
__mapper_args__ = {
"polymorphic_identity": "retailer",
"polymorphic_load": (
"inline" if use_poly_on_retailer else None
),
}
return Customer, Store, Retailer
yield go
@testing.variation("scenario", ["mapped_cls", "table", "table_alias"])
@testing.variation("use_poly", [True, False])
@testing.variation("use_poly_on_retailer", [True, False])
def test_select_attr_only(
self, scenario, use_poly, use_poly_on_retailer, mapping
):
Customer, Store, Retailer = mapping(
scenario, use_poly, use_poly_on_retailer
)
if scenario.mapped_cls:
self.assert_compile(
select(Retailer.store_count).select_from(Retailer),
"SELECT (SELECT count(store.id) AS count_1 "
"FROM customer JOIN store ON customer.id = store.id "
"WHERE store.retailer_id = retailer.id) AS anon_1 "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
elif scenario.table:
self.assert_compile(
select(Retailer.store_count).select_from(Retailer),
"SELECT (SELECT count(store.id) AS count_1 "
"FROM store "
"WHERE store.retailer_id = retailer.id) AS anon_1 "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
elif scenario.table_alias:
self.assert_compile(
select(Retailer.store_count).select_from(Retailer),
"SELECT (SELECT count(store_1.id) AS count_1 FROM store "
"AS store_1 "
"WHERE store_1.retailer_id = retailer.id) AS anon_1 "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
else:
scenario.fail()
@testing.variation("scenario", ["mapped_cls", "table", "table_alias"])
@testing.variation("use_poly", [True, False])
@testing.variation("use_poly_on_retailer", [True, False])
def test_select_cls(
self, scenario, mapping, use_poly, use_poly_on_retailer
):
Customer, Store, Retailer = mapping(
scenario, use_poly, use_poly_on_retailer
)
if scenario.mapped_cls:
self.assert_compile(
select(Retailer),
"SELECT (SELECT count(store.id) AS count_1 FROM customer "
"JOIN store ON customer.id = store.id "
"WHERE store.retailer_id = retailer.id) AS anon_1, "
"retailer.id, customer.id AS id_1, customer.type "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
elif scenario.table:
self.assert_compile(
select(Retailer),
"SELECT (SELECT count(store.id) AS count_1 FROM store "
"WHERE store.retailer_id = retailer.id) AS anon_1, "
"retailer.id, customer.id AS id_1, customer.type "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
elif scenario.table_alias:
self.assert_compile(
select(Retailer),
"SELECT (SELECT count(store_1.id) AS count_1 "
"FROM store AS store_1 WHERE store_1.retailer_id = "
"retailer.id) AS anon_1, retailer.id, customer.id AS id_1, "
"customer.type "
"FROM customer JOIN retailer ON customer.id = retailer.id",
)
else:
scenario.fail()
|
Issue8168Test
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/sqltypes.py
|
{
"start": 120997,
"end": 121182
}
|
class ____(BigInteger):
"""The SQL BIGINT type.
.. seealso::
:class:`_types.BigInteger` - documentation for the base type.
"""
__visit_name__ = "BIGINT"
|
BIGINT
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/asset_selection.py
|
{
"start": 43391,
"end": 44027
}
|
class ____(AssetSelection):
"""Used to represent a UI asset selection by column tag. This should not be resolved against
an in-process asset graph.
"""
key: str
value: str
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
"""This should not be invoked in user code."""
raise NotImplementedError
def to_selection_str(self) -> str:
if self.value:
return f'column_tag:"{self.key}"="{self.value}"'
else:
return f'column_tag:"{self.key}"'
@whitelist_for_serdes
@record
|
ColumnTagAssetSelection
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-powerbi/dagster_powerbi/resource.py
|
{
"start": 16716,
"end": 17940
}
|
class ____(StateBackedDefinitionsLoader[PowerBIWorkspaceData]):
workspace: PowerBIWorkspace
translator: DagsterPowerBITranslator
use_workspace_scan: bool
@property
def defs_key(self) -> str:
return f"{POWER_BI_RECONSTRUCTION_METADATA_KEY_PREFIX}/{self.workspace.workspace_id}"
def fetch_state(self) -> PowerBIWorkspaceData:
with self.workspace.process_config_and_initialize_cm() as initialized_workspace:
return initialized_workspace.fetch_powerbi_workspace_data(
use_workspace_scan=self.use_workspace_scan
)
def defs_from_state(self, state: PowerBIWorkspaceData) -> Definitions:
all_external_data = [
*state.dashboards_by_id.values(),
*state.reports_by_id.values(),
*state.semantic_models_by_id.values(),
]
all_external_asset_specs = [
self.translator.get_asset_spec(
PowerBITranslatorData(
content_data=content,
workspace_data=state,
)
)
for content in all_external_data
]
return Definitions(assets=[*all_external_asset_specs])
|
PowerBIWorkspaceDefsLoader
|
python
|
numba__numba
|
numba/core/datamodel/models.py
|
{
"start": 33869,
"end": 34202
}
|
class ____(StructModel):
def __init__(self, dmm, fe_type):
members = [('index', types.EphemeralPointer(types.intp)),
('tuple', fe_type.container,)]
super(UniTupleIter, self).__init__(dmm, fe_type, members)
@register_default(types.misc.SliceLiteral)
@register_default(types.SliceType)
|
UniTupleIter
|
python
|
pydata__xarray
|
xarray/namedarray/_typing.py
|
{
"start": 565,
"end": 1179
}
|
class ____(Enum):
token: Final = 0
_default = Default.token
# https://stackoverflow.com/questions/74633074/how-to-type-hint-a-generic-numpy-array
_T_co = TypeVar("_T_co", covariant=True)
_dtype = np.dtype
_DType = TypeVar("_DType", bound=np.dtype[Any])
_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any])
# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
_ScalarType = TypeVar("_ScalarType", bound=np.generic)
_ScalarType_co = TypeVar("_ScalarType_co", bound=np.generic, covariant=True)
# A protocol for anything with the dtype attribute
@runtime_checkable
|
Default
|
python
|
faif__python-patterns
|
patterns/structural/mvc.py
|
{
"start": 246,
"end": 664
}
|
class ____(ABC):
"""The Model is the data layer of the application."""
@abstractmethod
def __iter__(self) -> Any:
pass
@abstractmethod
def get(self, item: str) -> dict:
"""Returns an object with a .items() call method
that iterates over key,value pairs of its information."""
pass
@property
@abstractmethod
def item_type(self) -> str:
pass
|
Model
|
python
|
tornadoweb__tornado
|
tornado/template.py
|
{
"start": 21795,
"end": 22661
}
|
class ____(_Node):
def __init__(self, method: str, line: int, body: _Node) -> None:
self.method = method
self.line = line
self.body = body
def each_child(self) -> Iterable["_Node"]:
return (self.body,)
def generate(self, writer: "_CodeWriter") -> None:
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line(
f"_tt_append(_tt_utf8({self.method}({method_name}())))", self.line
)
|
_ApplyBlock
|
python
|
ansible__ansible
|
lib/ansible/_internal/_ssh/_ssh_agent.py
|
{
"start": 7684,
"end": 10326
}
|
class ____(Msg):
@staticmethod
def from_private_key(private_key: CryptoPrivateKey) -> PrivateKeyMsg:
match private_key:
case RSAPrivateKey():
rsa_pn: RSAPrivateNumbers = private_key.private_numbers()
return RSAPrivateKeyMsg(
KeyAlgo.RSA,
mpint(rsa_pn.public_numbers.n),
mpint(rsa_pn.public_numbers.e),
mpint(rsa_pn.d),
mpint(rsa_pn.iqmp),
mpint(rsa_pn.p),
mpint(rsa_pn.q),
)
case DSAPrivateKey():
dsa_pn: DSAPrivateNumbers = private_key.private_numbers()
return DSAPrivateKeyMsg(
KeyAlgo.DSA,
mpint(dsa_pn.public_numbers.parameter_numbers.p),
mpint(dsa_pn.public_numbers.parameter_numbers.q),
mpint(dsa_pn.public_numbers.parameter_numbers.g),
mpint(dsa_pn.public_numbers.y),
mpint(dsa_pn.x),
)
case EllipticCurvePrivateKey():
ecdsa_pn: EllipticCurvePrivateNumbers = private_key.private_numbers()
key_size = private_key.key_size
return EcdsaPrivateKeyMsg(
getattr(KeyAlgo, f'ECDSA{key_size}'),
unicode_string(f'nistp{key_size}'),
binary_string(
private_key.public_key().public_bytes(
encoding=serialization.Encoding.X962,
format=serialization.PublicFormat.UncompressedPoint,
)
),
mpint(ecdsa_pn.private_value),
)
case Ed25519PrivateKey():
public_bytes = private_key.public_key().public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw,
)
private_bytes = private_key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption(),
)
return Ed25519PrivateKeyMsg(
KeyAlgo.ED25519,
binary_string(public_bytes),
binary_string(private_bytes + public_bytes),
)
case _:
raise NotImplementedError(private_key)
@dataclasses.dataclass(order=True, slots=True)
|
PrivateKeyMsg
|
python
|
pytorch__pytorch
|
torch/storage.py
|
{
"start": 51308,
"end": 52396
}
|
class ____(TypedStorage, metaclass=_LegacyStorageMeta):
@classmethod
def _new_shared(cls, size): # type: ignore[override]
"""Create a new storage in shared memory with the same data type."""
untyped_storage = torch.UntypedStorage._new_shared(size * cls()._element_size())
return cls(wrap_storage=untyped_storage)
@classmethod
def _release_ipc_counter(cls, *args, **kwargs):
return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
@classmethod
def _new_shared_filename(cls, manager, obj, size):
bytes_size = size * torch._utils._element_size(cls.dtype)
return cls(
wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(
manager, obj, bytes_size
)
)
def _get_dtype_from_pickle_storage_type(pickle_storage_type: str):
try:
return _storage_type_to_dtype_map()[pickle_storage_type]
except KeyError as e:
raise KeyError(
f'pickle storage type "{pickle_storage_type}" is not recognized'
) from e
|
_LegacyStorage
|
python
|
joke2k__faker
|
tests/providers/test_automotive.py
|
{
"start": 12161,
"end": 12767
}
|
class ____(_SimpleAutomotiveTestMixin):
"""Test tr_TR automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r"\d{2} [A-Z] \d{4}|"
r"\d{2} [A-Z] \d{5}|"
r"\d{2} [A-Z]{2} \d{3}|"
r"\d{2} [A-Z]{2} \d{4}|"
r"\d{2} [A-Z]{3} \d{2}|"
r"\d{2} [A-Z]{3} \d{3}",
)
def perform_extra_checks(self, license_plate, match):
[city_code, letters, _] = license_plate.split(" ")
assert int(city_code) in range(1, 82)
assert all(letter in TrTrAutomotiveProvider.ascii_uppercase_turkish for letter in letters)
|
TestTrTr
|
python
|
vyperlang__vyper
|
vyper/ast/nodes.py
|
{
"start": 33180,
"end": 33491
}
|
class ____(Operator):
__slots__ = ()
_description = "modulus"
_pretty = "%"
def _op(self, left, right):
if not right:
raise ZeroDivisionException("Modulo by zero")
value = abs(left) % abs(right)
if left < 0:
value = -value
return value
|
Mod
|
python
|
google__flatbuffers
|
tests/py_test.py
|
{
"start": 4044,
"end": 5193
}
|
class ____(unittest.TestCase):
def test_wire_format(self):
# Verify that using the generated Python code builds a buffer without
# returning errors, and is interpreted correctly, for size prefixed
# representation and regular:
for sizePrefix in [True, False]:
for file_identifier in [None, b'MONS']:
gen_buf, gen_off = make_monster_from_generated_code(
sizePrefix=sizePrefix, file_identifier=file_identifier
)
CheckReadBuffer(
gen_buf,
gen_off,
sizePrefix=sizePrefix,
file_identifier=file_identifier,
)
# Verify that the canonical flatbuffer file is readable by the
# generated Python code. Note that context managers are not part of
# Python 2.5, so we use the simpler open/close methods here:
f = open('monsterdata_test.mon', 'rb')
canonicalWireData = f.read()
f.close()
CheckReadBuffer(bytearray(canonicalWireData), 0, file_identifier=b'MONS')
# Write the generated buffer out to a file:
f = open('monsterdata_python_wire.mon', 'wb')
f.write(gen_buf[gen_off:])
f.close()
|
TestWireFormat
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/filters.py
|
{
"start": 57384,
"end": 57925
}
|
class ____(PrefectFilterBaseModel):
"""Filter by `BlockSchema.block_type_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of block type ids to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.BlockSchema.block_type_id.in_(self.any_))
return filters
|
BlockSchemaFilterBlockTypeId
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_post_process.py
|
{
"start": 87948,
"end": 89734
}
|
class ____(BasePostProgressGroupMixin):
def assert_organization_key(self, organization: Organization, exists: bool) -> None:
key = get_organization_bucket_key(organization)
cluster = get_cluster()
assert exists == cluster.sismember(key, str(organization.id))
def test_uptime_detection_feature_url(self) -> None:
event = self.create_event(
data={"request": {"url": "http://sentry.io"}},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.assert_organization_key(self.organization, True)
def test_uptime_detection_feature_no_url(self) -> None:
event = self.create_event(
data={},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.assert_organization_key(self.organization, False)
@override_options({"uptime.automatic-hostname-detection": False})
def test_uptime_detection_no_option(self) -> None:
event = self.create_event(
data={"request": {"url": "http://sentry.io"}},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.assert_organization_key(self.organization, False)
@patch("sentry.analytics.record")
@patch("sentry.utils.metrics.incr")
@patch("sentry.utils.metrics.distribution")
|
DetectBaseUrlsForUptimeTestMixin
|
python
|
django__django
|
django/contrib/postgres/fields/hstore.py
|
{
"start": 2529,
"end": 2875
}
|
class ____(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "(%s -> %%s)" % lhs, (*params, self.key_name)
|
KeyTransform
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_escapes03.py
|
{
"start": 315,
"end": 1059
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("escapes03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file.Check encoding of rich strings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", "a", bold, """b"<>'c""", "defg")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
encode__starlette
|
starlette/endpoints.py
|
{
"start": 506,
"end": 2153
}
|
class ____:
def __init__(self, scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "http"
self.scope = scope
self.receive = receive
self.send = send
self._allowed_methods = [
method
for method in ("GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS")
if getattr(self, method.lower(), None) is not None
]
def __await__(self) -> Generator[Any, None, None]:
return self.dispatch().__await__()
async def dispatch(self) -> None:
request = Request(self.scope, receive=self.receive)
handler_name = "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower()
handler: Callable[[Request], Any] = getattr(self, handler_name, self.method_not_allowed)
is_async = is_async_callable(handler)
if is_async:
response = await handler(request)
else:
response = await run_in_threadpool(handler, request)
await response(self.scope, self.receive, self.send)
async def method_not_allowed(self, request: Request) -> Response:
# If we're running inside a starlette application then raise an
# exception, so that the configurable exception handler can deal with
# returning the response. For plain ASGI apps, just return the response.
headers = {"Allow": ", ".join(self._allowed_methods)}
if "app" in self.scope:
raise HTTPException(status_code=405, headers=headers)
return PlainTextResponse("Method Not Allowed", status_code=405, headers=headers)
|
HTTPEndpoint
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 9573,
"end": 9782
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("DISMISSED", "UNVIEWED", "VIEWED")
Float = sgqlc.types.Float
|
FileViewedState
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/asset_selection.py
|
{
"start": 47326,
"end": 48046
}
|
class ____(AssetSelection):
selected_key_prefixes: Sequence[Sequence[str]]
include_sources: bool
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
base_set = (
asset_graph.get_all_asset_keys()
if self.include_sources
else asset_graph.materializable_asset_keys
)
return {
key
for key in base_set
if any(key.has_prefix(prefix) for prefix in self.selected_key_prefixes)
}
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return self
@whitelist_for_serdes
@record
|
KeyPrefixesAssetSelection
|
python
|
google__jax
|
jax/experimental/jax2tf/call_tf.py
|
{
"start": 16180,
"end": 16517
}
|
class ____(effects.Effect):
__str__ = lambda _: "CallTfEffect"
call_tf_effect = CallTfEffect()
effects.lowerable_effects.add_type(CallTfEffect)
effects.control_flow_allowed_effects.add_type(CallTfEffect)
effects.remat_allowed_effects.add_type(CallTfEffect)
effects.custom_derivatives_allowed_effects.add_type(CallTfEffect)
|
CallTfEffect
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 154885,
"end": 155469
}
|
class ____(sgqlc.types.Input):
"""Images attached to the check run output displayed in the GitHub
pull request UI.
"""
__schema__ = github_schema
__field_names__ = ("alt", "image_url", "caption")
alt = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="alt")
"""The alternative text for the image."""
image_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="imageUrl")
"""The full URL of the image."""
caption = sgqlc.types.Field(String, graphql_name="caption")
"""A short image description."""
|
CheckRunOutputImage
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/utils_tests/test_dataloader.py
|
{
"start": 388,
"end": 1025
}
|
class ____:
key: str
batch_keys: list[str]
@staticmethod
async def gen(context: Context, key: str) -> "Thing":
return await context.loader.load(key)
async def gen_other_thing(self, context: Context):
return await context.loader.load(f"{self.key}_other")
async def gen_other_other_other_thing(self, context: Context):
other = await self.gen_other_thing(context)
other_other = await other.gen_other_thing(context)
return await other_other.gen_other_thing(context)
async def batch_load_fn(keys: list[str]):
return [Thing(key=key, batch_keys=keys) for key in keys]
|
Thing
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 82924,
"end": 86529
}
|
class ____(TypedDict, total=False):
type: Required[Literal['function-plain']]
function: Required[ValidationFunction]
ref: str
json_schema_input_schema: CoreSchema
metadata: dict[str, Any]
serialization: SerSchema
def no_info_plain_validator_function(
function: NoInfoValidatorFunction,
*,
ref: str | None = None,
json_schema_input_schema: CoreSchema | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> PlainValidatorFunctionSchema:
"""
Returns a schema that uses the provided function for validation, no `info` argument is passed, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(v: str) -> str:
assert 'hello' in v
return v + 'world'
schema = core_schema.no_info_plain_validator_function(function=fn)
v = SchemaValidator(schema)
assert v.validate_python('hello ') == 'hello world'
```
Args:
function: The validator function to call
ref: optional unique identifier of the schema, used to reference the schema in other places
json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='function-plain',
function={'type': 'no-info', 'function': function},
ref=ref,
json_schema_input_schema=json_schema_input_schema,
metadata=metadata,
serialization=serialization,
)
def with_info_plain_validator_function(
function: WithInfoValidatorFunction,
*,
field_name: str | None = None,
ref: str | None = None,
json_schema_input_schema: CoreSchema | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> PlainValidatorFunctionSchema:
"""
Returns a schema that uses the provided function for validation, an `info` argument is passed, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(v: str, info: core_schema.ValidationInfo) -> str:
assert 'hello' in v
return v + 'world'
schema = core_schema.with_info_plain_validator_function(function=fn)
v = SchemaValidator(schema)
assert v.validate_python('hello ') == 'hello world'
```
Args:
function: The validator function to call
field_name: The name of the field this validator is applied to, if any (deprecated)
ref: optional unique identifier of the schema, used to reference the schema in other places
json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
if field_name is not None:
warnings.warn(
'The `field_name` argument on `with_info_plain_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.',
DeprecationWarning,
stacklevel=2,
)
return _dict_not_none(
type='function-plain',
function=_dict_not_none(type='with-info', function=function, field_name=field_name),
ref=ref,
json_schema_input_schema=json_schema_input_schema,
metadata=metadata,
serialization=serialization,
)
|
PlainValidatorFunctionSchema
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_organization_member_details.py
|
{
"start": 1497,
"end": 6939
}
|
class ____(OrganizationMemberTestBase):
def test_me(self) -> None:
response = self.get_success_response(self.organization.slug, "me")
assert response.data["role"] == "owner"
assert response.data["orgRole"] == "owner"
assert response.data["user"]["id"] == str(self.user.id)
assert response.data["email"] == self.user.email
def test_get_by_id(self) -> None:
user = self.create_user("dummy@example.com")
member = OrganizationMember.objects.create(
organization=self.organization, user_id=user.id, role="member"
)
self.login_as(user)
response = self.get_success_response(self.organization.slug, member.id)
assert response.data["role"] == "member"
assert response.data["orgRole"] == "member"
assert response.data["id"] == str(member.id)
def test_get_by_garbage(self) -> None:
self.get_error_response(self.organization.slug, "trash", status_code=404)
def test_cannot_get_unapproved_invite(self) -> None:
join_request = self.create_member(
organization=self.organization,
email="test@gmail.com",
invite_status=InviteStatus.REQUESTED_TO_JOIN.value,
)
invite_request = self.create_member(
organization=self.organization,
email="test2@gmail.com",
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
self.get_error_response(self.organization.slug, join_request.id, status_code=404)
self.get_error_response(self.organization.slug, invite_request.id, status_code=404)
def test_invite_link_does_not_exist(self) -> None:
pending_om = self.create_member(
user=None,
email="bar@example.com",
organization=self.organization,
role="member",
teams=[],
)
response = self.get_success_response(self.organization.slug, pending_om.id)
assert "invite_link" not in response.data
def test_member_cannot_get_invite_link(self) -> None:
pending_om = self.create_member(
user=None,
email="bar@example.com",
organization=self.organization,
role="member",
teams=[],
)
member = self.create_user("baz@example.com")
self.create_member(organization=self.organization, user=member, role="member")
self.login_as(member)
response = self.get_success_response(self.organization.slug, pending_om.id)
assert "invite_link" not in response.data
def test_get_member_list_teams(self) -> None:
team = self.create_team(organization=self.organization, name="Team")
member = self.create_user("baz@example.com")
member_om = self.create_member(
organization=self.organization, user=member, role="member", teams=[team]
)
response = self.get_success_response(self.organization.slug, member_om.id)
assert team.slug in response.data["teams"]
assert response.data["teamRoles"][0]["teamSlug"] == team.slug
assert response.data["teamRoles"][0]["role"] is None
def test_lists_organization_roles(self) -> None:
response = self.get_success_response(self.organization.slug, "me")
assert response.data["roles"] == response.data["orgRoleList"]
role_ids = [role["id"] for role in response.data["orgRoleList"]]
assert role_ids == ["member", "admin", "manager", "owner"]
@with_feature("organizations:team-roles")
def test_hides_retired_organization_roles(self) -> None:
"""
Note: Admin will be hidden after team-roles EA.
"""
response = self.get_success_response(self.organization.slug, "me")
assert response.data["roles"] == response.data["orgRoleList"]
role_ids = [role["id"] for role in response.data["orgRoleList"]]
assert role_ids == ["member", "admin", "manager", "owner"]
def test_lists_team_roles(self) -> None:
response = self.get_success_response(self.organization.slug, "me")
role_ids = [role["id"] for role in response.data["teamRoleList"]]
assert role_ids == ["contributor", "admin"]
def test_does_not_include_secondary_emails(self) -> None:
# Create a user with multiple email addresses
user = self.create_user("primary@example.com", username="multi_email_user")
self.create_useremail(user, "secondary1@example.com")
self.create_useremail(user, "secondary2@example.com")
# Add user to organization
member = self.create_member(organization=self.organization, user=user, role="member")
response = self.get_success_response(self.organization.slug, member.id)
# Check that only primary email is present and no other email addresses are exposed
assert response.data["email"] == "primary@example.com"
assert "emails" not in response.data["user"]
assert "emails" not in response.data.get("serializedUser", {})
def test_does_not_serialize_placeholder_member(self) -> None:
invite = self.create_member_invite(organization=self.organization)
placeholder_om = invite.organization_member
response = self.get_error_response(self.organization.slug, placeholder_om.id)
assert response.data["detail"] == "The requested resource does not exist"
|
GetOrganizationMemberTest
|
python
|
getsentry__sentry
|
src/sentry/snuba/metrics/mqb_query_transformer.py
|
{
"start": 878,
"end": 18459
}
|
class ____(Exception):
pass
def _get_derived_op_metric_field_from_snuba_function(function: Function):
if len(function.parameters) == 0 or not isinstance(function.parameters[0], Column):
raise MQBQueryTransformationException(
"The first parameter of a function should be a column of the metric MRI"
)
default_args_for_snql_func = {"aggregate_filter", "org_id", "alias", "use_case_id"}
metric_field_params = {}
function_params = function.parameters[1:]
snql_func_args = inspect.signature(DERIVED_OPS[function.function].snql_func).parameters.keys()
for arg in snql_func_args:
if arg in default_args_for_snql_func:
continue
try:
metric_field_params[arg] = function_params.pop(0)
except IndexError:
raise MQBQueryTransformationException(
f"Too few function parameters are provided. The arguments required for function "
f"{function.function} are "
f"{[arg for arg in snql_func_args if arg not in default_args_for_snql_func]}"
)
return MetricField(
op=function.function,
metric_mri=function.parameters[0].name,
params=metric_field_params,
alias=function.alias,
)
def _transform_select(query_select):
select = []
for select_field in query_select:
if isinstance(select_field, (Column, AliasedExpression)):
if isinstance(select_field, AliasedExpression):
column_field = select_field.exp
column_alias = select_field.alias
else:
column_field = select_field
column_alias = None
try:
select.append(
MetricField(op=None, metric_mri=column_field.name, alias=column_alias)
)
except InvalidParams as e:
raise MQBQueryTransformationException(e)
elif isinstance(select_field, Function):
if select_field.function in DERIVED_OPS:
select.append(_get_derived_op_metric_field_from_snuba_function(select_field))
else:
if select_field.function not in OPERATIONS:
raise MQBQueryTransformationException(
f"Function '{select_field.function}' is not supported"
)
if len(select_field.parameters) == 0 or not isinstance(
select_field.parameters[0], Column
):
raise MQBQueryTransformationException(
"The first parameter of a function should be a column of the metric MRI"
)
select.append(
MetricField(
op=select_field.function,
metric_mri=select_field.parameters[0].name,
alias=select_field.alias,
)
)
else:
raise MQBQueryTransformationException(f"Unsupported select field {select_field}")
return select
def _transform_groupby(query_groupby):
mq_groupby = []
interval = None
include_series = False
for groupby_field in query_groupby:
if isinstance(groupby_field, (Column, AliasedExpression)):
if isinstance(groupby_field, AliasedExpression):
column_field = groupby_field.exp
column_alias = groupby_field.alias
else:
column_field = groupby_field
column_alias = None
if column_field.name in FIELD_ALIAS_MAPPINGS.keys() | FIELD_ALIAS_MAPPINGS.values():
mq_groupby.append(
MetricGroupByField(
field=column_field.name,
alias=column_alias,
)
)
elif column_field.name.startswith("tags["):
mq_groupby.append(
MetricGroupByField(
field=column_field.name.split("tags[")[1].split("]")[0],
alias=column_alias,
)
)
else:
raise MQBQueryTransformationException(
f"Unsupported groupby field '{column_field.name}'"
)
elif isinstance(groupby_field, Function):
if (
groupby_field.function in DERIVED_OPS
and DERIVED_OPS[groupby_field.function].can_groupby
):
mq_groupby.append(
MetricGroupByField(
field=_get_derived_op_metric_field_from_snuba_function(groupby_field),
alias=groupby_field.alias,
)
)
elif groupby_field.function == "toStartOfInterval":
# Checks against the following snuba function
# time_groupby_column = Function(
# function="toStartOfInterval",
# parameters=[
# Column(name="timestamp"),
# Function(
# function="toIntervalSecond",
# parameters=[self._metrics_query.interval],
# alias=None,
# ),
# "Universal",
# ],
# alias=TS_COL_GROUP,
# )
include_series = True
# Maps to `toIntervalSecond` function
interval_func = groupby_field.parameters[1]
assert (
isinstance(interval_func, Function)
and interval_func.function == "toIntervalSecond"
)
interval = interval_func.parameters[0]
continue
else:
raise MQBQueryTransformationException(
f"Cannot group by function {groupby_field.function}"
)
else:
raise MQBQueryTransformationException(f"Unsupported groupby field {groupby_field}")
return mq_groupby if len(mq_groupby) > 0 else None, include_series, interval
def _get_mq_dict_params_and_conditions_from(conditions):
mq_dict = {}
converted_conditions = []
for condition in conditions:
if isinstance(condition, BooleanCondition):
inner_mq_dict, inner_conditions = _get_mq_dict_params_and_conditions_from(
condition.conditions
)
mq_dict.update(inner_mq_dict)
converted_conditions.append(
BooleanCondition(op=condition.op, conditions=inner_conditions)
)
elif isinstance(condition.lhs, Column):
if condition.lhs.name == "project_id":
mq_dict["project_ids"] = condition.rhs
elif condition.lhs.name == "org_id":
mq_dict["org_id"] = condition.rhs
elif condition.lhs.name == "timestamp":
if condition.op == Op.GTE:
mq_dict["start"] = condition.rhs
elif condition.op == Op.LT:
mq_dict["end"] = condition.rhs
# In the transformer we allow all query values but the actual check will be performed by the execution
# engine of the metrics layer.
else:
converted_conditions.append(condition)
elif isinstance(condition.lhs, Function):
if condition.lhs.function in DERIVED_OPS:
if not DERIVED_OPS[condition.lhs.function].can_filter:
raise MQBQueryTransformationException(
f"Cannot filter by function {condition.lhs.function}"
)
converted_conditions.append(
MetricConditionField(
lhs=_get_derived_op_metric_field_from_snuba_function(condition.lhs),
op=condition.op,
rhs=condition.rhs,
)
)
elif condition.lhs.function in FUNCTION_ALLOWLIST:
converted_conditions.append(condition)
else:
raise MQBQueryTransformationException(
f"Unsupported function '{condition.lhs.function}' in where"
)
else:
converted_conditions.append(condition)
return mq_dict, converted_conditions
def _transform_orderby(query_orderby):
mq_orderby = []
for orderby_field in query_orderby:
orderby_exp = orderby_field.exp
# We want to use the string field only when a column with a valid field is passed. For example:
# Column(name="project_id").
if (
isinstance(orderby_exp, Column)
and orderby_exp.name in FIELD_ALIAS_MAPPINGS.keys() | FIELD_ALIAS_MAPPINGS.values()
):
metric_order_by = MetricOrderByField(
field=orderby_exp.name,
direction=orderby_field.direction,
)
else:
transformed_field = _transform_select([orderby_exp]).pop()
metric_exp = metric_object_factory(
op=transformed_field.op, metric_mri=transformed_field.metric_mri
)
try:
metric_exp.validate_can_orderby()
except DerivedMetricException as e:
raise MQBQueryTransformationException(e)
metric_order_by = MetricOrderBy(
field=transformed_field, direction=orderby_field.direction
)
mq_orderby.append(metric_order_by)
return mq_orderby if len(mq_orderby) > 0 else None
def _derive_mri_to_apply(project_ids, select, orderby):
mri_dictionary = {
"generic_metrics_distributions": TransactionMRI.DURATION.value,
"generic_metrics_sets": TransactionMRI.USER.value,
}
mri_to_apply = TransactionMRI.DURATION.value
# We first check if there is an order by field that has the team_key_transaction, otherwise
# we just use the default mri of duration.
has_order_by_team_key_transaction = False
if orderby is not None:
for orderby_field in orderby:
if isinstance(orderby_field.field, MetricField):
if orderby_field.field.op == TEAM_KEY_TRANSACTION_OP:
has_order_by_team_key_transaction = True
break
if has_order_by_team_key_transaction:
entities = set()
if len(orderby) == 1:
# If the number of clauses in the order by is equal to 1 and the order by has a team_key_transaction it
# means that it must be the only one, therefore we want to infer the MRI type of the team_key_transaction
# from one entity in the select in order to save up a query. This is just an optimization for the edge case
# in which the select has a different entity than the default entity for the team_key_transaction, which
# is the distribution, inferred from TransactionMRI.DURATION.
for select_field in select:
if select_field.op != TEAM_KEY_TRANSACTION_OP:
expr = metric_object_factory(select_field.op, select_field.metric_mri)
entity = expr.get_entity(project_ids, use_case_id=UseCaseID.TRANSACTIONS)
if isinstance(entity, str):
entities.add(entity)
else:
# If the number of clauses in the order by is more than 1 it means that together with team_key_transaction
# there are other order by conditions and by definition we want all the order by conditions to belong to
# the same entity type, therefore we want to check how many entities are there in the other order by
# conditions and if there is only one we will infer the MRI type of the team_key_transaction
# from that one entity. If, on the other hand, there are multiple entities, then we throw an error because
# an order by across multiple entities is not supported.
for orderby_field in orderby:
if isinstance(orderby_field.field, MetricField):
if orderby_field.field.op != TEAM_KEY_TRANSACTION_OP:
expr = metric_object_factory(
orderby_field.field.op, orderby_field.field.metric_mri
)
entity = expr.get_entity(project_ids, use_case_id=UseCaseID.TRANSACTIONS)
if isinstance(entity, str):
entities.add(entity)
if len(entities) > 1:
raise InvalidParams("The orderby cannot have fields with multiple entities.")
if len(entities) > 0:
# Only if entities are found in the clauses we are going to update the MRI to apply, otherwise we will just
# resort to the default one.
mri_to_apply = mri_dictionary[entities.pop()]
return mri_to_apply
def _transform_team_key_transaction_in_select(mri_to_apply, select):
if select is None:
return select
def _select_predicate(select_field):
if select_field.op == TEAM_KEY_TRANSACTION_OP:
return MetricField(
op=select_field.op,
metric_mri=mri_to_apply,
params=select_field.params,
alias=select_field.alias,
)
return select_field
return list(map(_select_predicate, select))
def _transform_team_key_transaction_in_where(mri_to_apply, where):
if where is None:
return where
def _where_predicate(where_field):
if (
isinstance(where_field, MetricConditionField)
and where_field.lhs.op == TEAM_KEY_TRANSACTION_OP
):
return MetricConditionField(
lhs=MetricField(
op=where_field.lhs.op,
metric_mri=mri_to_apply,
params=where_field.lhs.params,
alias=where_field.lhs.alias,
),
op=where_field.op,
rhs=where_field.rhs,
)
return where_field
return list(map(_where_predicate, where))
def _transform_team_key_transaction_in_groupby(mri_to_apply, groupby):
if groupby is None:
return groupby
def _groupby_predicate(groupby_field):
if (
isinstance(groupby_field.field, MetricField)
and groupby_field.field.op == TEAM_KEY_TRANSACTION_OP
):
return MetricGroupByField(
field=MetricField(
op=groupby_field.field.op,
metric_mri=mri_to_apply,
params=groupby_field.field.params,
alias=groupby_field.field.alias,
),
)
return groupby_field
return list(map(_groupby_predicate, groupby))
def _transform_team_key_transaction_in_orderby(mri_to_apply, orderby):
if orderby is None:
return orderby
def _orderby_predicate(orderby_field):
if isinstance(orderby_field.field, MetricField):
if orderby_field.field.op == TEAM_KEY_TRANSACTION_OP:
return MetricOrderByField(
field=MetricField(
op=orderby_field.field.op,
metric_mri=mri_to_apply,
params=orderby_field.field.params,
alias=orderby_field.field.alias,
),
direction=orderby_field.direction,
)
return orderby_field
return list(map(_orderby_predicate, orderby))
def _transform_team_key_transaction_fake_mri(mq_dict):
if "project_ids" not in mq_dict:
raise MQBQueryTransformationException("Missing project_id in query")
mri_to_apply = _derive_mri_to_apply(
mq_dict["project_ids"], mq_dict["select"], mq_dict["orderby"]
)
return {
"select": _transform_team_key_transaction_in_select(mri_to_apply, mq_dict["select"]),
"where": _transform_team_key_transaction_in_where(mri_to_apply, mq_dict["where"]),
"groupby": _transform_team_key_transaction_in_groupby(mri_to_apply, mq_dict["groupby"]),
"orderby": _transform_team_key_transaction_in_orderby(mri_to_apply, mq_dict["orderby"]),
}
def transform_mqb_query_to_metrics_query(
query: Query,
is_alerts_query: bool = False,
) -> DeprecatingMetricsQuery:
groupby, include_series, interval = _transform_groupby(query.groupby)
where_mq_dict, where_conditions = _get_mq_dict_params_and_conditions_from(query.where)
mq_dict = {
"select": _transform_select(query.select),
"groupby": groupby,
"limit": query.limit,
"offset": query.offset,
"include_totals": True,
"include_series": include_series,
"granularity": query.granularity if query.granularity is not None else Granularity(3600),
"orderby": _transform_orderby(query.orderby),
"interval": interval,
"is_alerts_query": is_alerts_query,
"having": query.having,
"where": where_conditions,
**where_mq_dict,
}
# This code is just an edge case specific for the team_key_transaction derived operation.
mq_dict.update(**_transform_team_key_transaction_fake_mri(mq_dict))
return DeprecatingMetricsQuery(**mq_dict)
|
MQBQueryTransformationException
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 151989,
"end": 155611
}
|
class ____:
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} # used in prepare()
defaults = {} # used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
|
BaseTemplate
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_dms.py
|
{
"start": 24175,
"end": 33802
}
|
class ____:
TASK_DATA = {
"ReplicationConfigIdentifier": "test-config",
"ReplicationConfigArn": "arn:xxxxxx",
"SourceEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:RZZK4EZW5UANC7Y3P4E776WHBE",
"TargetEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:GVBUJQXJZASXWHTWCLN2WNT57E",
"ComputeConfig": {
"MaxCapacityUnits": 2,
"MinCapacityUnits": 4,
},
"ReplicationType": "full-load",
"TableMappings": json.dumps(
{
"TableMappings": [
{
"Type": "Selection",
"RuleId": 123,
"RuleName": "test-rule",
"SourceSchema": "/",
"SourceTable": "/",
}
]
}
),
"ReplicationSettings": "string",
"SupplementalSettings": "string",
"ResourceIdentifier": "string",
}
def get_replication_status(self, status: str, deprovisioned: str = "deprovisioned"):
return [
{
"Status": status,
"ReplicationArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
"ReplicationIdentifier": "test-config",
"SourceEndpointArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
"TargetEndpointArn": "XXXXXXXXXXXXXXXXXXXXXXXXX",
"ProvisionData": {"ProvisionState": deprovisioned, "ProvisionedCapacityUnits": 2},
}
]
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsDeleteReplicationConfigOperator, "handle_delete_wait")
@mock.patch.object(DmsHook, "get_waiter")
def test_happy_path(self, mock_waiter, mock_handle, mock_describe_replications, mock_conn):
# testing all good statuses and no waiting
mock_describe_replications.return_value = self.get_replication_status(
status="stopped", deprovisioned="deprovisioned"
)
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=False,
)
op.execute({})
mock_conn.delete_replication_config.assert_called_once()
mock_waiter.assert_has_calls(
[
mock.call("replication_terminal_status"),
mock.call().wait(
Filters=[{"Name": "replication-config-arn", "Values": ["arn:xxxxxx"]}],
WaiterConfig={"Delay": 60, "MaxAttempts": 60},
),
]
)
mock_handle.assert_called_once()
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
def test_defer_not_ready(self, mock_describe, mock_conn):
mock_describe.return_value = self.get_replication_status("running")
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=True,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, DmsReplicationTerminalStatusTrigger)
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsHook, "get_waiter")
def test_wait_for_completion(self, mock_waiter, mock_describe_replications, mock_conn):
mock_describe_replications.return_value = self.get_replication_status(
status="failed", deprovisioned="deprovisioned"
)
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=True,
)
op.execute({})
mock_waiter.assert_has_calls(
[
mock.call("replication_terminal_status"),
mock.call().wait(
Filters=[{"Name": "replication-config-arn", "Values": ["arn:xxxxxx"]}],
WaiterConfig={"Delay": 60, "MaxAttempts": 60},
),
]
)
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsHook, "get_waiter")
def test_wait_for_completion_not_ready(self, mock_waiter, mock_describe_replications, mock_conn):
mock_describe_replications.return_value = self.get_replication_status(
status="failed", deprovisioned="xxx"
)
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=True,
)
op.execute({})
mock_waiter.assert_has_calls(
[
mock.call("replication_terminal_status"),
mock.call().wait(
Filters=[{"Name": "replication-config-arn", "Values": ["arn:xxxxxx"]}],
WaiterConfig={"Delay": 60, "MaxAttempts": 60},
),
]
)
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsDeleteReplicationConfigOperator, "handle_delete_wait")
@mock.patch.object(DmsHook, "get_waiter")
def test_not_ready_state(self, mock_waiter, mock_handle, mock_describe, mock_conn):
mock_describe.return_value = self.get_replication_status("running")
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=False,
)
op.execute({})
mock_waiter.assert_has_calls(
[
mock.call("replication_terminal_status"),
mock.call().wait(
Filters=[{"Name": "replication-config-arn", "Values": ["arn:xxxxxx"]}],
WaiterConfig={"Delay": 60, "MaxAttempts": 60},
),
]
)
mock_handle.assert_called_once()
mock_conn.delete_replication_config.assert_called_once()
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsDeleteReplicationConfigOperator, "handle_delete_wait")
@mock.patch.object(DmsHook, "get_waiter")
def test_not_deprovisioned(self, mock_waiter, mock_handle, mock_describe, mock_conn):
mock_describe.return_value = self.get_replication_status("stopped", "deprovisioning")
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=False,
)
op.execute({})
mock_waiter.assert_has_calls(
[
mock.call("replication_terminal_status"),
mock.call().wait(
Filters=[{"Name": "replication-config-arn", "Values": ["arn:xxxxxx"]}],
WaiterConfig={"Delay": 60, "MaxAttempts": 60},
),
]
)
mock_handle.assert_called_once()
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsHook, "get_waiter")
def test_config_not_found(self, mock_waiter, mock_describe, mock_conn):
mock_describe.return_value = []
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=False,
wait_for_completion=False,
)
with pytest.raises(IndexError):
op.execute({})
mock_waiter.assert_not_called()
mock_conn.delete_replication_config.assert_not_called()
@mock.patch.object(DmsHook, "conn")
@mock.patch.object(DmsHook, "describe_replications")
@mock.patch.object(DmsHook, "get_waiter")
def test_defer_not_deprovisioned(self, mock_waiter, mock_describe, mock_conn):
# not deprovisioned
mock_describe.return_value = self.get_replication_status("stopped", "deprovisioning")
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=True,
wait_for_completion=False,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, DmsReplicationDeprovisionedTrigger)
# not in terminal status
mock_describe.return_value = self.get_replication_status("running", "deprovisioning")
op = DmsDeleteReplicationConfigOperator(
task_id="delete_replication_config",
replication_config_arn=self.TASK_DATA["ReplicationConfigArn"],
deferrable=True,
wait_for_completion=False,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, DmsReplicationTerminalStatusTrigger)
|
TestDmsDeleteReplicationConfigOperator
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypeIs1.py
|
{
"start": 2095,
"end": 2294
}
|
class ____(str):
@classmethod
def method1(cls, v: str):
if type(v) is cls:
reveal_type(v, expected_text="G*")
else:
reveal_type(v, expected_text="str")
|
G
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 118191,
"end": 118309
}
|
class ____:
xlSummaryAbove = 0 # from enum XlSummaryRow
xlSummaryBelow = 1 # from enum XlSummaryRow
|
SummaryRow
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 062. 实现前缀树/Solution.py
|
{
"start": 0,
"end": 1041
}
|
class ____:
def __init__(self):
self.children = [None] * 26
self.is_end = False
def insert(self, word: str) -> None:
node = self
for c in word:
idx = ord(c) - ord('a')
if node.children[idx] is None:
node.children[idx] = Trie()
node = node.children[idx]
node.is_end = True
def search(self, word: str) -> bool:
node = self._search_prefix(word)
return node is not None and node.is_end
def startsWith(self, prefix: str) -> bool:
node = self._search_prefix(prefix)
return node is not None
def _search_prefix(self, prefix: str):
node = self
for c in prefix:
idx = ord(c) - ord('a')
if node.children[idx] is None:
return None
node = node.children[idx]
return node
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
|
Trie
|
python
|
giampaolo__psutil
|
scripts/internal/print_dist.py
|
{
"start": 2064,
"end": 3756
}
|
class ____(Wheel):
def platform(self):
return "source"
def arch(self):
return "-"
def pyver(self):
return "-"
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'dir',
nargs="?",
default="dist",
help='directory containing tar.gz or wheel files',
)
args = parser.parse_args()
groups = collections.defaultdict(list)
ls = sorted(os.listdir(args.dir), key=lambda x: x.endswith("tar.gz"))
for name in ls:
path = os.path.join(args.dir, name)
if path.endswith(".whl"):
pkg = Wheel(path)
elif path.endswith(".tar.gz"):
pkg = Tarball(path)
else:
raise ValueError(f"invalid package {path!r}")
groups[pkg.platform()].append(pkg)
tot_files = 0
tot_size = 0
templ = "{:<120} {:>7} {:>8} {:>7}"
for platf, pkgs in groups.items():
ppn = f"{platf} ({len(pkgs)})"
s = templ.format(ppn, "size", "arch", "pyver")
print_color('\n' + s, color=None, bold=True)
for pkg in sorted(pkgs, key=lambda x: x.name):
tot_files += 1
tot_size += pkg.size()
s = templ.format(
" " + pkg.name,
bytes2human(pkg.size()),
pkg.arch(),
pkg.pyver(),
)
if 'pypy' in pkg.pyver():
print_color(s, color='violet')
else:
print_color(s, color='brown')
print_color(
f"\n\ntotals: files={tot_files}, size={bytes2human(tot_size)}",
bold=True,
)
if __name__ == '__main__':
main()
|
Tarball
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pathspec/util.py
|
{
"start": 15299,
"end": 17929
}
|
class ____(object):
"""
The :class:`.TreeEntry` class contains information about a file-system
entry.
"""
#: Make the class dict-less.
__slots__ = ('_lstat', 'name', 'path', '_stat')
def __init__(self, name, path, lstat, stat):
"""
Initialize the :class:`.TreeEntry` instance.
*name* (:class:`str`) is the base name of the entry.
*path* (:class:`str`) is the relative path of the entry.
*lstat* (:class:`~os.stat_result`) is the stat result of the direct
entry.
*stat* (:class:`~os.stat_result`) is the stat result of the entry,
potentially linked.
"""
self._lstat = lstat
"""
*_lstat* (:class:`~os.stat_result`) is the stat result of the direct
entry.
"""
self.name = name
"""
*name* (:class:`str`) is the base name of the entry.
"""
self.path = path
"""
*path* (:class:`str`) is the path of the entry.
"""
self._stat = stat
"""
*_stat* (:class:`~os.stat_result`) is the stat result of the linked
entry.
"""
def is_dir(self, follow_links=None):
"""
Get whether the entry is a directory.
*follow_links* (:class:`bool` or :data:`None`) is whether to follow
symbolic links. If this is :data:`True`, a symlink to a directory
will result in :data:`True`. Default is :data:`None` for :data:`True`.
Returns whether the entry is a directory (:class:`bool`).
"""
if follow_links is None:
follow_links = True
node_stat = self._stat if follow_links else self._lstat
return stat.S_ISDIR(node_stat.st_mode)
def is_file(self, follow_links=None):
"""
Get whether the entry is a regular file.
*follow_links* (:class:`bool` or :data:`None`) is whether to follow
symbolic links. If this is :data:`True`, a symlink to a regular file
will result in :data:`True`. Default is :data:`None` for :data:`True`.
Returns whether the entry is a regular file (:class:`bool`).
"""
if follow_links is None:
follow_links = True
node_stat = self._stat if follow_links else self._lstat
return stat.S_ISREG(node_stat.st_mode)
def is_symlink(self):
"""
Returns whether the entry is a symbolic link (:class:`bool`).
"""
return stat.S_ISLNK(self._lstat.st_mode)
def stat(self, follow_links=None):
"""
Get the cached stat result for the entry.
*follow_links* (:class:`bool` or :data:`None`) is whether to follow
symbolic links. If this is :data:`True`, the stat result of the
linked file will be returned. Default is :data:`None` for :data:`True`.
Returns that stat result (:class:`~os.stat_result`).
"""
if follow_links is None:
follow_links = True
return self._stat if follow_links else self._lstat
|
TreeEntry
|
python
|
hynek__structlog
|
src/structlog/stdlib.py
|
{
"start": 4389,
"end": 16284
}
|
class ____(BoundLoggerBase):
"""
Python Standard Library version of `structlog.BoundLogger`.
Works exactly like the generic one except that it takes advantage of
knowing the logging methods in advance.
Use it like::
structlog.configure(
wrapper_class=structlog.stdlib.BoundLogger,
)
It also contains a bunch of properties that pass-through to the wrapped
`logging.Logger` which should make it work as a drop-in replacement.
.. versionadded:: 23.1.0
Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
.. versionchanged:: 24.2.0
Callsite parameters are now also collected by
`structlog.processors.CallsiteParameterAdder` for async log methods.
"""
_logger: logging.Logger
def bind(self, **new_values: Any) -> Self:
"""
Return a new logger with *new_values* added to the existing ones.
"""
return super().bind(**new_values)
def unbind(self, *keys: str) -> Self:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
return super().unbind(*keys)
def try_unbind(self, *keys: str) -> Self:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 18.2.0
"""
return super().try_unbind(*keys)
def new(self, **new_values: Any) -> Self:
"""
Clear context and binds *initial_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are reused.
"""
return super().new(**new_values)
def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.debug` with the result.
"""
return self._proxy_to_logger("debug", event, *args, **kw)
def info(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.info` with the result.
"""
return self._proxy_to_logger("info", event, *args, **kw)
def warning(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.warning` with the result.
"""
return self._proxy_to_logger("warning", event, *args, **kw)
warn = warning
def error(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.error` with the result.
"""
return self._proxy_to_logger("error", event, *args, **kw)
def critical(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.critical` with the result.
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.critical` with the result.
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def exception(
self, event: str | None = None, *args: Any, **kw: Any
) -> Any:
"""
Process event and call `logging.Logger.exception` with the result,
after setting ``exc_info`` to `True` if it's not already set.
"""
kw.setdefault("exc_info", True)
return self._proxy_to_logger("exception", event, *args, **kw)
def log(
self, level: int, event: str | None = None, *args: Any, **kw: Any
) -> Any:
"""
Process *event* and call the appropriate logging method depending on
*level*.
"""
return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
def _proxy_to_logger(
self,
method_name: str,
event: str | None = None,
*event_args: str,
**event_kw: Any,
) -> Any:
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the ``event_dict`` so
that the stdlib's support for format strings can be used.
"""
if event_args:
event_kw["positional_args"] = event_args
return super()._proxy_to_logger(method_name, event=event, **event_kw)
# Pass-through attributes and methods to mimic the stdlib's logger
# interface.
@property
def name(self) -> str:
"""
Returns :attr:`logging.Logger.name`
"""
return self._logger.name
@property
def level(self) -> int:
"""
Returns :attr:`logging.Logger.level`
"""
return self._logger.level
@property
def parent(self) -> Any:
"""
Returns :attr:`logging.Logger.parent`
"""
return self._logger.parent
@property
def propagate(self) -> bool:
"""
Returns :attr:`logging.Logger.propagate`
"""
return self._logger.propagate
@property
def handlers(self) -> Any:
"""
Returns :attr:`logging.Logger.handlers`
"""
return self._logger.handlers
@property
def disabled(self) -> int:
"""
Returns :attr:`logging.Logger.disabled`
"""
return self._logger.disabled
def setLevel(self, level: int) -> None:
"""
Calls :meth:`logging.Logger.setLevel` with unmodified arguments.
"""
self._logger.setLevel(level)
def findCaller(
self, stack_info: bool = False, stacklevel: int = 1
) -> tuple[str, int, str, str | None]:
"""
Calls :meth:`logging.Logger.findCaller` with unmodified arguments.
"""
# No need for stacklevel-adjustments since we're within structlog and
# our frames are ignored unconditionally.
return self._logger.findCaller(
stack_info=stack_info, stacklevel=stacklevel
)
def makeRecord(
self,
name: str,
level: int,
fn: str,
lno: int,
msg: str,
args: tuple[Any, ...],
exc_info: ExcInfo,
func: str | None = None,
extra: Any = None,
) -> logging.LogRecord:
"""
Calls :meth:`logging.Logger.makeRecord` with unmodified arguments.
"""
return self._logger.makeRecord(
name, level, fn, lno, msg, args, exc_info, func=func, extra=extra
)
def handle(self, record: logging.LogRecord) -> None:
"""
Calls :meth:`logging.Logger.handle` with unmodified arguments.
"""
self._logger.handle(record)
def addHandler(self, hdlr: logging.Handler) -> None:
"""
Calls :meth:`logging.Logger.addHandler` with unmodified arguments.
"""
self._logger.addHandler(hdlr)
def removeHandler(self, hdlr: logging.Handler) -> None:
"""
Calls :meth:`logging.Logger.removeHandler` with unmodified arguments.
"""
self._logger.removeHandler(hdlr)
def hasHandlers(self) -> bool:
"""
Calls :meth:`logging.Logger.hasHandlers` with unmodified arguments.
Exists only in Python 3.
"""
return self._logger.hasHandlers()
def callHandlers(self, record: logging.LogRecord) -> None:
"""
Calls :meth:`logging.Logger.callHandlers` with unmodified arguments.
"""
self._logger.callHandlers(record)
def getEffectiveLevel(self) -> int:
"""
Calls :meth:`logging.Logger.getEffectiveLevel` with unmodified
arguments.
"""
return self._logger.getEffectiveLevel()
def isEnabledFor(self, level: int) -> bool:
"""
Calls :meth:`logging.Logger.isEnabledFor` with unmodified arguments.
"""
return self._logger.isEnabledFor(level)
def getChild(self, suffix: str) -> logging.Logger:
"""
Calls :meth:`logging.Logger.getChild` with unmodified arguments.
"""
return self._logger.getChild(suffix)
# Non-Standard Async
async def _dispatch_to_sync(
self,
meth: Callable[..., Any],
event: str,
args: tuple[Any, ...],
kw: dict[str, Any],
) -> None:
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
try:
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `debug()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.debug, event, args, kw)
async def ainfo(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `info()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.info, event, args, kw)
async def awarning(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `warning()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.warning, event, args, kw)
async def aerror(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `error()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.error, event, args, kw)
async def acritical(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `critical()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `critical()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `exception()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
# To make `log.exception("foo") work, we have to check if the user
# passed an explicit exc_info and if not, supply our own.
if kw.get("exc_info", True) is True and kw.get("exception") is None:
kw["exc_info"] = sys.exc_info()
await self._dispatch_to_sync(self.exception, event, args, kw)
async def alog(
self, level: Any, event: str, *args: Any, **kw: Any
) -> None:
"""
Log using `log()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(partial(self.log, level), event, args, kw)
def get_logger(*args: Any, **initial_values: Any) -> BoundLogger:
"""
Only calls `structlog.get_logger`, but has the correct type hints.
.. warning::
Does **not** check whether -- or ensure that -- you've configured
*structlog* for standard library :mod:`logging`!
See :doc:`standard-library` for details.
.. versionadded:: 20.2.0
"""
return _config.get_logger(*args, **initial_values)
|
BoundLogger
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-commcare/source_commcare/source.py
|
{
"start": 3057,
"end": 4609
}
|
class ____(CommcareStream, IncrementalMixin):
cursor_field = "indexed_on"
_cursor_value = None
@property
def state(self) -> Mapping[str, Any]:
if self._cursor_value:
return {self.cursor_field: self._cursor_value}
@state.setter
def state(self, value: Mapping[str, Any]):
self._cursor_value = datetime.strptime(value[self.cursor_field], self.dateformat)
@property
def sync_mode(self):
return SyncMode.incremental
@property
def supported_sync_modes(self):
return [SyncMode.incremental]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
try:
# Server returns status 500 when there are no more rows.
# raise an error if server returns an error
response.raise_for_status()
meta = response.json()["meta"]
if meta["next"]:
return parse_qs(meta["next"][1:])
return None
except Exception:
return None
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {"format": "json"}
if next_page_token:
params.update(next_page_token)
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
for o in iter(response.json()["objects"]):
yield o
return None
|
IncrementalStream
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth2/rfc6749/clients/mobile_application.py
|
{
"start": 280,
"end": 8874
}
|
class ____(Client):
"""A public client utilizing the implicit code grant workflow.
A user-agent-based application is a public client in which the
client code is downloaded from a web server and executes within a
user-agent (e.g. web browser) on the device used by the resource
owner. Protocol data and credentials are easily accessible (and
often visible) to the resource owner. Since such applications
reside within the user-agent, they can make seamless use of the
user-agent capabilities when requesting authorization.
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device.
"""
response_type = 'token'
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registered with the OAuth
provider prior to use. As described in `Section 3.1.2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``token``::
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=token'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=token&foo=bar'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
"""
scope = self.scope if scope is None else scope
return prepare_grant_uri(uri, self.client_id, self.response_type,
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
def parse_request_uri_response(self, uri, state=None, scope=None):
"""Parse the response URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
:param uri: The callback URI that resulted from the user being redirected
back from the provider to you, the client.
:param state: The state provided in the authorization request.
:param scope: The scopes provided in the authorization request.
:return: Dictionary of token parameters.
:raises: OAuth2Error if response is invalid.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
**state**
If you provided the state parameter in the authorization phase, then
the provider is required to include that exact state value in the
response.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
A few example responses can be seen below::
>>> response_uri = 'https://example.com/callback#access_token=sdlfkj452&state=ss345asyht&token_type=Bearer&scope=hello+world'
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.parse_request_uri_response(response_uri)
{
'access_token': 'sdlfkj452',
'token_type': 'Bearer',
'state': 'ss345asyht',
'scope': [u'hello', u'world']
}
>>> client.parse_request_uri_response(response_uri, state='other')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
**scope**
File "oauthlib/oauth2/rfc6749/parameters.py", line 197, in parse_implicit_response
raise ValueError("Mismatching or missing state in params.")
ValueError: Mismatching or missing state in params.
>>> def alert_scope_changed(message, old, new):
... print(message, old, new)
...
>>> oauthlib.signals.scope_changed.connect(alert_scope_changed)
>>> client.parse_request_body_response(response_body, scope=['other'])
('Scope has changed from "other" to "hello world".', ['other'], ['hello', 'world'])
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
"""
scope = self.scope if scope is None else scope
self.token = parse_implicit_response(uri, state=state, scope=scope)
self.populate_token_attributes(self.token)
return self.token
|
MobileApplicationClient
|
python
|
jazzband__django-simple-history
|
simple_history/tests/tests/test_models.py
|
{
"start": 62390,
"end": 63320
}
|
class ____(TestCase):
def setUp(self):
self.manager1 = Employee.objects.create()
self.manager2 = Employee.objects.create()
self.employee = Employee.objects.create(manager=self.manager1)
self.employee.manager = self.manager2
self.employee.save()
self.manager1_id = self.manager1.id
self.manager1.delete()
def test_history_is_complete(self):
historical_manager_ids = list(
self.employee.history.order_by("pk").values_list("manager_id", flat=True)
)
self.assertEqual(historical_manager_ids, [self.manager1_id, self.manager2.id])
def test_restore_employee(self):
historical = self.employee.history.order_by("pk")[0]
original = historical.instance
self.assertEqual(original.manager_id, self.manager1_id)
with self.assertRaises(Employee.DoesNotExist):
original.manager
|
TestMissingOneToOne
|
python
|
PrefectHQ__prefect
|
src/prefect/server/orchestration/core_policy.py
|
{
"start": 34907,
"end": 38450
}
|
class ____(FlowRunOrchestrationRule):
"""
Rejects failed states and schedules a retry if the retry limit has not been reached.
This rule rejects transitions into a failed state if `retries` has been
set and the run count has not reached the specified limit. The client will be
instructed to transition into a scheduled state to retry flow execution.
"""
FROM_STATES = {StateType.RUNNING}
TO_STATES = {StateType.FAILED}
async def before_transition(
self,
initial_state: states.State[Any] | None,
proposed_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.FlowRun, core.FlowRunPolicy],
) -> None:
if initial_state is None or proposed_state is None:
return
run_settings = context.run_settings
run_count = context.run.run_count
if run_settings.retries is None or run_count > run_settings.retries:
# Clear retry type to allow for future infrastructure level retries (e.g. via the UI)
updated_policy = context.run.empirical_policy.model_dump()
updated_policy["retry_type"] = None
context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)
return # Retry count exceeded, allow transition to failed
scheduled_start_time = now("UTC") + datetime.timedelta(
seconds=run_settings.retry_delay or 0
)
# support old-style flow run retries for older clients
# older flow retries require us to loop over failed tasks to update their state
# this is not required after API version 0.8.3
api_version = context.parameters.get("api-version", None)
if api_version and api_version < Version("0.8.3"):
failed_task_runs = await models.task_runs.read_task_runs(
context.session,
flow_run_filter=filters.FlowRunFilter(
id=filters.FlowRunFilterId(any_=[context.run.id])
),
task_run_filter=filters.TaskRunFilter(
state=filters.TaskRunFilterState(
type=filters.TaskRunFilterStateType(any_=[StateType.FAILED])
)
),
)
for run in failed_task_runs:
await models.task_runs.set_task_run_state(
context.session,
run.id,
state=states.AwaitingRetry(scheduled_time=scheduled_start_time),
force=True,
)
# Reset the run count so that the task run retries still work correctly
run.run_count = 0
# Reset pause metadata on retry
# Pauses as a concept only exist after API version 0.8.4
api_version = context.parameters.get("api-version", None)
if api_version is None or api_version >= Version("0.8.4"):
updated_policy = context.run.empirical_policy.model_dump()
updated_policy["resuming"] = False
updated_policy["pause_keys"] = set()
updated_policy["retry_type"] = "in_process"
context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)
# Generate a new state for the flow
retry_state = states.AwaitingRetry(
scheduled_time=scheduled_start_time,
message=proposed_state.message,
data=proposed_state.data,
)
await self.reject_transition(state=retry_state, reason="Retrying")
|
RetryFailedFlows
|
python
|
dask__distributed
|
distributed/scheduler.py
|
{
"start": 335969,
"end": 336882
}
|
class ____(Exception):
def __init__(self, task: Key, last_worker: WorkerState, allowed_failures: int):
super().__init__(task, last_worker, allowed_failures)
@property
def task(self) -> Key:
return self.args[0]
@property
def last_worker(self) -> WorkerState:
return self.args[1]
@property
def allowed_failures(self) -> int:
return self.args[2]
def __str__(self) -> str:
return (
f"Attempted to run task {self.task!r} on {self.allowed_failures + 1} "
"different workers, but all those workers died while running it. "
f"The last worker that attempt to run the task was {self.last_worker.address}. "
"Inspecting worker logs is often a good next step to diagnose what went wrong. "
"For more information see https://distributed.dask.org/en/stable/killed.html."
)
|
KilledWorker
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/evaluation/string_distance/base.py
|
{
"start": 1668,
"end": 4816
}
|
class ____(Chain):
"""Shared methods for the rapidfuzz string distance evaluators."""
distance: StringDistance = Field(default=StringDistance.JARO_WINKLER)
normalize_score: bool = Field(default=True)
"""Whether to normalize the score to a value between `0` and `1`.
Applies only to the Levenshtein and Damerau-Levenshtein distances."""
@pre_init
def validate_dependencies(cls, values: dict[str, Any]) -> dict[str, Any]:
"""Validate that the rapidfuzz library is installed.
Args:
values: The input values.
Returns:
The validated values.
"""
_load_rapidfuzz()
return values
@property
def output_keys(self) -> list[str]:
"""Get the output keys.
Returns:
The output keys.
"""
return ["score"]
def _prepare_output(self, result: dict[str, Any]) -> dict[str, Any]:
"""Prepare the output dictionary.
Args:
result: The evaluation results.
Returns:
The prepared output dictionary.
"""
result = {"score": result["score"]}
if RUN_KEY in result:
result[RUN_KEY] = result[RUN_KEY].dict()
return result
@staticmethod
def _get_metric(distance: str, *, normalize_score: bool = False) -> Callable:
"""Get the distance metric function based on the distance type.
Args:
distance: The distance type.
normalize_score: Whether to normalize the score.
Returns:
The distance metric function.
Raises:
ValueError: If the distance metric is invalid.
"""
from rapidfuzz import distance as rf_distance
module_map: dict[str, Any] = {
StringDistance.DAMERAU_LEVENSHTEIN: rf_distance.DamerauLevenshtein,
StringDistance.LEVENSHTEIN: rf_distance.Levenshtein,
StringDistance.JARO: rf_distance.Jaro,
StringDistance.JARO_WINKLER: rf_distance.JaroWinkler,
StringDistance.HAMMING: rf_distance.Hamming,
StringDistance.INDEL: rf_distance.Indel,
}
if distance not in module_map:
msg = (
f"Invalid distance metric: {distance}"
f"\nMust be one of: {list(StringDistance)}"
)
raise ValueError(msg)
module = module_map[distance]
if normalize_score:
return module.normalized_distance
return module.distance
@property
def metric(self) -> Callable:
"""Get the distance metric function.
Returns:
The distance metric function.
"""
return _RapidFuzzChainMixin._get_metric(
self.distance,
normalize_score=self.normalize_score,
)
def compute_metric(self, a: str, b: str) -> float:
"""Compute the distance between two strings.
Args:
a: The first string.
b: The second string.
Returns:
The distance between the two strings.
"""
return self.metric(a, b)
|
_RapidFuzzChainMixin
|
python
|
plotly__plotly.py
|
plotly/graph_objs/_contourcarpet.py
|
{
"start": 215,
"end": 60525
}
|
class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "contourcarpet"
_valid_props = {
"a",
"a0",
"asrc",
"atype",
"autocolorscale",
"autocontour",
"b",
"b0",
"bsrc",
"btype",
"carpet",
"coloraxis",
"colorbar",
"colorscale",
"contours",
"customdata",
"customdatasrc",
"da",
"db",
"fillcolor",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"meta",
"metasrc",
"name",
"ncontours",
"opacity",
"reversescale",
"showlegend",
"showscale",
"stream",
"text",
"textsrc",
"transpose",
"type",
"uid",
"uirevision",
"visible",
"xaxis",
"yaxis",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zorder",
"zsrc",
}
@property
def a(self):
"""
Sets the x coordinates.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
@property
def a0(self):
"""
Alternate to `x`. Builds a linear space of x coordinates. Use
with `dx` where `x0` is the starting coordinate and `dx` the
step.
The 'a0' property accepts values of any type
Returns
-------
Any
"""
return self["a0"]
@a0.setter
def a0(self, val):
self["a0"] = val
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for `a`.
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
@property
def atype(self):
"""
If "array", the heatmap's x coordinates are given by "x" (the
default behavior when `x` is provided). If "scaled", the
heatmap's x coordinates are given by "x0" and "dx" (the default
behavior when `x` is not provided).
The 'atype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['array', 'scaled']
Returns
-------
Any
"""
return self["atype"]
@atype.setter
def atype(self, val):
self["atype"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def autocontour(self):
"""
Determines whether or not the contour level attributes are
picked by an algorithm. If True, the number of contour levels
can be set in `ncontours`. If False, set the contour level
attributes in `contours`.
The 'autocontour' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocontour"]
@autocontour.setter
def autocontour(self, val):
self["autocontour"] = val
@property
def b(self):
"""
Sets the y coordinates.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
@property
def b0(self):
"""
Alternate to `y`. Builds a linear space of y coordinates. Use
with `dy` where `y0` is the starting coordinate and `dy` the
step.
The 'b0' property accepts values of any type
Returns
-------
Any
"""
return self["b0"]
@b0.setter
def b0(self, val):
self["b0"] = val
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `b`.
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
@property
def btype(self):
"""
If "array", the heatmap's y coordinates are given by "y" (the
default behavior when `y` is provided) If "scaled", the
heatmap's y coordinates are given by "y0" and "dy" (the default
behavior when `y` is not provided)
The 'btype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['array', 'scaled']
Returns
-------
Any
"""
return self["btype"]
@btype.setter
def btype(self, val):
self["btype"] = val
@property
def carpet(self):
"""
The `carpet` of the carpet axes on which this contour trace
lies
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.contourcarpet.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.contourcarpet.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def contours(self):
"""
The 'contours' property is an instance of Contours
that may be specified as:
- An instance of :class:`plotly.graph_objs.contourcarpet.Contours`
- A dict of string/value properties that will be passed
to the Contours constructor
Returns
-------
plotly.graph_objs.contourcarpet.Contours
"""
return self["contours"]
@contours.setter
def contours(self, val):
self["contours"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def da(self):
"""
Sets the x coordinate step. See `x0` for more info.
The 'da' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["da"]
@da.setter
def da(self, val):
self["da"] = val
@property
def db(self):
"""
Sets the y coordinate step. See `y0` for more info.
The 'db' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["db"]
@db.setter
def db(self, val):
self["db"] = val
@property
def fillcolor(self):
"""
Sets the fill color if `contours.type` is "constraint".
Defaults to a half-transparent variant of the line color,
marker color, or marker line color, whichever is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to contourcarpet.colorscale
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.contourcarpet.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.contourcarpet.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.contourcarpet.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.contourcarpet.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def ncontours(self):
"""
Sets the maximum number of contour levels. The actual number of
contours will be chosen automatically to be less than or equal
to the value of `ncontours`. Has an effect only if
`autocontour` is True or if `contours.size` is missing.
The 'ncontours' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ncontours"]
@ncontours.setter
def ncontours(self, val):
self["ncontours"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.contourcarpet.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.contourcarpet.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets the text elements associated with each z value.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def transpose(self):
"""
Transposes the z data.
The 'transpose' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["transpose"]
@transpose.setter
def transpose(self, val):
self["transpose"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def z(self):
"""
Sets the z data.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
a
Sets the x coordinates.
a0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
atype
If "array", the heatmap's x coordinates are given by
"x" (the default behavior when `x` is provided). If
"scaled", the heatmap's x coordinates are given by "x0"
and "dx" (the default behavior when `x` is not
provided).
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
b
Sets the y coordinates.
b0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
btype
If "array", the heatmap's y coordinates are given by
"y" (the default behavior when `y` is provided) If
"scaled", the heatmap's y coordinates are given by "y0"
and "dy" (the default behavior when `y` is not
provided)
carpet
The `carpet` of the carpet axes on which this contour
trace lies
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.contourcarpet.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contours
:class:`plotly.graph_objects.contourcarpet.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
da
Sets the x coordinate step. See `x0` for more info.
db
Sets the y coordinate step. See `y0` for more info.
fillcolor
Sets the fill color if `contours.type` is "constraint".
Defaults to a half-transparent variant of the line
color, marker color, or marker line color, whichever is
available.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.contourcarpet.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.contourcarpet.Line`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.contourcarpet.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each z value.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
transpose
Transposes the z data.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
z
Sets the z data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
a=None,
a0=None,
asrc=None,
atype=None,
autocolorscale=None,
autocontour=None,
b=None,
b0=None,
bsrc=None,
btype=None,
carpet=None,
coloraxis=None,
colorbar=None,
colorscale=None,
contours=None,
customdata=None,
customdatasrc=None,
da=None,
db=None,
fillcolor=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
meta=None,
metasrc=None,
name=None,
ncontours=None,
opacity=None,
reversescale=None,
showlegend=None,
showscale=None,
stream=None,
text=None,
textsrc=None,
transpose=None,
uid=None,
uirevision=None,
visible=None,
xaxis=None,
yaxis=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zorder=None,
zsrc=None,
**kwargs,
):
"""
Construct a new Contourcarpet object
Plots contours on either the first carpet axis or the carpet
axis with a matching `carpet` attribute. Data `z` is
interpreted as matching that of the corresponding carpet axis.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Contourcarpet`
a
Sets the x coordinates.
a0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
atype
If "array", the heatmap's x coordinates are given by
"x" (the default behavior when `x` is provided). If
"scaled", the heatmap's x coordinates are given by "x0"
and "dx" (the default behavior when `x` is not
provided).
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
b
Sets the y coordinates.
b0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
btype
If "array", the heatmap's y coordinates are given by
"y" (the default behavior when `y` is provided) If
"scaled", the heatmap's y coordinates are given by "y0"
and "dy" (the default behavior when `y` is not
provided)
carpet
The `carpet` of the carpet axes on which this contour
trace lies
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.contourcarpet.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
contours
:class:`plotly.graph_objects.contourcarpet.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
da
Sets the x coordinate step. See `x0` for more info.
db
Sets the y coordinate step. See `y0` for more info.
fillcolor
Sets the fill color if `contours.type` is "constraint".
Defaults to a half-transparent variant of the line
color, marker color, or marker line color, whichever is
available.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.contourcarpet.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.contourcarpet.Line`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.contourcarpet.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each z value.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
transpose
Transposes the z data.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
z
Sets the z data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Contourcarpet
"""
super().__init__("contourcarpet")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Contourcarpet
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Contourcarpet`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("a", arg, a)
self._set_property("a0", arg, a0)
self._set_property("asrc", arg, asrc)
self._set_property("atype", arg, atype)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("autocontour", arg, autocontour)
self._set_property("b", arg, b)
self._set_property("b0", arg, b0)
self._set_property("bsrc", arg, bsrc)
self._set_property("btype", arg, btype)
self._set_property("carpet", arg, carpet)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("contours", arg, contours)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("da", arg, da)
self._set_property("db", arg, db)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("ncontours", arg, ncontours)
self._set_property("opacity", arg, opacity)
self._set_property("reversescale", arg, reversescale)
self._set_property("showlegend", arg, showlegend)
self._set_property("showscale", arg, showscale)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("transpose", arg, transpose)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._set_property("xaxis", arg, xaxis)
self._set_property("yaxis", arg, yaxis)
self._set_property("z", arg, z)
self._set_property("zauto", arg, zauto)
self._set_property("zmax", arg, zmax)
self._set_property("zmid", arg, zmid)
self._set_property("zmin", arg, zmin)
self._set_property("zorder", arg, zorder)
self._set_property("zsrc", arg, zsrc)
self._props["type"] = "contourcarpet"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Contourcarpet
|
python
|
ray-project__ray
|
python/ray/_common/tests/test_utils.py
|
{
"start": 661,
"end": 2265
}
|
class ____:
"""Tests for the get_or_create_event_loop utility function."""
def test_existing_event_loop(self):
# With running event loop
expect_loop = asyncio.new_event_loop()
expect_loop.set_debug(True)
asyncio.set_event_loop(expect_loop)
with warnings.catch_warnings():
# Assert no deprecating warnings raised for python>=3.10.
warnings.simplefilter("error")
actual_loop = get_or_create_event_loop()
assert actual_loop == expect_loop, "Loop should not be recreated."
def test_new_event_loop(self):
with warnings.catch_warnings():
# Assert no deprecating warnings raised for python>=3.10.
warnings.simplefilter("error")
loop = get_or_create_event_loop()
assert loop is not None, "new event loop should be created."
@pytest.mark.asyncio
async def test_run_background_task():
"""Test the run_background_task utility function."""
result = {}
async def co():
result["start"] = 1
await asyncio.sleep(0)
result["end"] = 1
run_background_task(co())
# Background task is running.
assert len(_BACKGROUND_TASKS) == 1
# co executed.
await asyncio.sleep(0)
# await asyncio.sleep(0) from co is reached.
await asyncio.sleep(0)
# co finished and callback called.
await asyncio.sleep(0)
# The task should be removed from the set once it finishes.
assert len(_BACKGROUND_TASKS) == 0
assert result.get("start") == 1
assert result.get("end") == 1
|
TestGetOrCreateEventLoop
|
python
|
walkccc__LeetCode
|
solutions/309. Best Time to Buy and Sell Stock with Cooldown/309.py
|
{
"start": 0,
"end": 267
}
|
class ____:
def maxProfit(self, prices: list[int]) -> int:
sell = 0
hold = -math.inf
prev = 0
for price in prices:
cache = sell
sell = max(sell, hold + price)
hold = max(hold, prev - price)
prev = cache
return sell
|
Solution
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_ma.py
|
{
"start": 52,
"end": 394
}
|
class ____(Benchmark):
def setup(self):
self.l100 = range(100)
self.t100 = ([True] * 100)
def time_masked_array(self):
np.ma.masked_array()
def time_masked_array_l100(self):
np.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
np.ma.masked_array(self.l100, self.t100)
|
MA
|
python
|
astropy__astropy
|
astropy/config/paths.py
|
{
"start": 8122,
"end": 10004
}
|
class ____(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_directory_type = "config"
_directory_env_var = "XDG_CONFIG_HOME"
def __enter__(self) -> str:
# Special case for the config case, where we need to reset all the
# cached config objects. We do keep the cache, since some of it
# may have been set programmatically rather than be stored in the
# config file (e.g., iers.conf.auto_download=False for our tests).
from .configuration import _cfgobjs
self._cfgobjs_copy = _cfgobjs.copy()
_cfgobjs.clear()
return super().__enter__()
def __exit__(
self,
type: type[BaseException] | None,
value: BaseException | None,
tb: TracebackType | None,
) -> None:
from .configuration import _cfgobjs
_cfgobjs.clear()
_cfgobjs.update(self._cfgobjs_copy)
del self._cfgobjs_copy
super().__exit__(type, value, tb)
|
set_temp_config
|
python
|
scikit-learn__scikit-learn
|
sklearn/tests/metadata_routing_common.py
|
{
"start": 8103,
"end": 11019
}
|
class ____(ClassifierMixin, BaseEstimator):
"""A classifier consuming metadata.
Parameters
----------
registry : list, default=None
If a list, the estimator will append itself to the list in order to have
a reference to the estimator later on. Since that reference is not
required in all tests, registration can be skipped by leaving this value
as None.
alpha : float, default=0
This parameter is only used to test the ``*SearchCV`` objects, and
doesn't do anything.
"""
def __init__(self, registry=None, alpha=0.0):
self.alpha = alpha
self.registry = registry
def partial_fit(
self, X, y, classes=None, sample_weight="default", metadata="default"
):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
_check_partial_fit_first_call(self, classes)
return self
def fit(self, X, y, sample_weight="default", metadata="default"):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
self.classes_ = np.unique(y)
self.coef_ = np.ones_like(X)
return self
def predict(self, X, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
y_score = np.empty(shape=(len(X),), dtype="int8")
y_score[len(X) // 2 :] = 0
y_score[: len(X) // 2] = 1
return y_score
def predict_proba(self, X, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
y_proba = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float32)
# each row sums up to 1.0:
y_proba[:] = np.random.dirichlet(alpha=np.ones(len(self.classes_)), size=len(X))
return y_proba
def predict_log_proba(self, X, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return self.predict_proba(X)
def decision_function(self, X, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
y_score = np.empty(shape=(len(X),))
y_score[len(X) // 2 :] = 0
y_score[: len(X) // 2] = 1
return y_score
def score(self, X, y, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return 1
|
ConsumingClassifier
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/queues.py
|
{
"start": 45997,
"end": 49217
}
|
class ____(Response):
"""
Response of queues.get_queue_metrics endpoint.
:param queues: List of the requested queues with their metrics. If no queue ids
were requested then 'all' queue is returned with the metrics averaged accross
all the company queues.
:type queues: Sequence[QueueMetrics]
"""
_service = "queues"
_action = "get_queue_metrics"
_version = "2.9"
_schema = {
"definitions": {
"queue_metrics": {
"properties": {
"avg_waiting_times": {
"description": "List of average waiting times for tasks in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the maximum value is taken.",
"items": {"type": "number"},
"type": ["array", "null"],
},
"dates": {
"description": "List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no queue status change was recorded are omitted.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"queue": {
"description": "ID of the queue",
"type": ["string", "null"],
},
"queue_lengths": {
"description": "List of tasks counts in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the count that corresponds to the maximum average value is taken.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"queues": {
"description": "List of the requested queues with their metrics. If no queue ids were requested then 'all' queue is returned with the metrics averaged accross all the company queues.",
"items": {"$ref": "#/definitions/queue_metrics"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, queues: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetQueueMetricsResponse, self).__init__(**kwargs)
self.queues = queues
@schema_property("queues")
def queues(self) -> Optional[List[Any]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [QueueMetrics.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", QueueMetrics, is_array=True)
self._property_queues = value
|
GetQueueMetricsResponse
|
python
|
boto__boto3
|
tests/unit/docs/test_attr.py
|
{
"start": 700,
"end": 3335
}
|
class ____(BaseDocsTest):
def setUp(self):
super().setUp()
self.add_shape(
{
'NestedStruct': {
'type': 'structure',
'members': {
'NestedStrAttr': {
'shape': 'String',
'documentation': 'Documents a nested string attribute',
}
},
}
}
)
self.add_shape(
{
'ResourceShape': {
'type': 'structure',
'members': {
'StringAttr': {
'shape': 'String',
'documentation': 'Documents a string attribute',
},
'NestedAttr': {
'shape': 'NestedStruct',
'documentation': 'Documents a nested attribute',
},
},
}
}
)
self.setup_client_and_resource()
self.event_emitter = HierarchicalEmitter()
self.service_name = 'myservice'
self.resource_name = 'myresource'
self.service_model = self.client.meta.service_model
def test_document_attr_scalar(self):
shape_model = self.service_model.shape_for('ResourceShape')
attr_name = 'StringAttr'
document_attribute(
self.doc_structure,
self.service_name,
self.resource_name,
attr_name,
self.event_emitter,
shape_model.members[attr_name],
)
self.assert_contains_lines_in_order(
[
'.. py:attribute:: StringAttr',
' - *(string) --* Documents a string attribute',
]
)
def test_document_attr_structure(self):
shape_model = self.service_model.shape_for('ResourceShape')
attr_name = 'NestedAttr'
document_attribute(
self.doc_structure,
self.service_name,
self.resource_name,
attr_name,
self.event_emitter,
shape_model.members[attr_name],
)
self.assert_contains_lines_in_order(
[
'.. py:attribute:: NestedAttr',
' - *(dict) --* Documents a nested attribute',
(
' - **NestedStrAttr** *(string) --* Documents a nested '
'string attribute'
),
]
)
|
TestDocumentAttribute
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/_fully_shard/_fsdp_common.py
|
{
"start": 2328,
"end": 2512
}
|
class ____(FSDPMeshInfo, DDPMeshInfo):
def __post_init__(self):
# Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo`
super().__post_init__()
|
HSDPMeshInfo
|
python
|
walkccc__LeetCode
|
solutions/223. Rectangle Area/223.py
|
{
"start": 0,
"end": 329
}
|
class ____:
def computeArea(self,
A: int, B: int, C: int, D: int,
E: int, F: int, G: int, H: int) -> int:
x = min(C, G) - max(A, E) if max(A, E) < min(C, G) else 0
y = min(D, H) - max(B, F) if max(B, F) < min(D, H) else 0
return (C - A) * (D - B) + (G - E) * (H - F) - x * y
|
Solution
|
python
|
numba__numba
|
numba/core/types/misc.py
|
{
"start": 5372,
"end": 6863
}
|
class ____(Type):
"""
Type class for optional types, i.e. union { some type, None }
"""
def __init__(self, typ):
assert not isinstance(typ, (Optional, NoneType))
typ = unliteral(typ)
self.type = typ
name = "OptionalType(%s)" % self.type
super(Optional, self).__init__(name)
@property
def key(self):
return self.type
def can_convert_to(self, typingctx, other):
if isinstance(other, Optional):
return typingctx.can_convert(self.type, other.type)
else:
conv = typingctx.can_convert(self.type, other)
if conv is not None:
return max(conv, Conversion.safe)
def can_convert_from(self, typingctx, other):
if isinstance(other, NoneType):
return Conversion.promote
elif isinstance(other, Optional):
return typingctx.can_convert(other.type, self.type)
else:
conv = typingctx.can_convert(other, self.type)
if conv is not None:
return max(conv, Conversion.promote)
def unify(self, typingctx, other):
if isinstance(other, Optional):
unified = typingctx.unify_pairs(self.type, other.type)
else:
unified = typingctx.unify_pairs(self.type, other)
if unified is not None:
if isinstance(unified, Optional):
return unified
else:
return Optional(unified)
|
Optional
|
python
|
numpy__numpy
|
tools/swig/test/testVector.py
|
{
"start": 12482,
"end": 12747
}
|
class ____(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
|
ulongTestCase
|
python
|
Textualize__textual
|
src/textual/demo/game.py
|
{
"start": 5995,
"end": 7910
}
|
class ____(containers.Vertical):
"""An individual tile in the puzzle.
A Tile is a container with a static inside it.
The static contains the code (as a Rich Syntax object), scrolled so the
relevant portion is visible.
"""
DEFAULT_CSS = """
Tile {
position: absolute;
Static {
width: auto;
height: auto;
&:hover { tint: $primary 30%; }
}
&#blank { visibility: hidden; }
}
"""
position: reactive[Offset] = reactive(Offset)
def __init__(
self,
renderable: ConsoleRenderable,
tile: int | None,
size: Size,
position: Offset,
) -> None:
self.renderable = renderable
self.tile = tile
self.tile_size = size
self.start_position = position
super().__init__(id="blank" if tile is None else f"tile{self.tile}")
self.set_reactive(Tile.position, position)
def compose(self) -> ComposeResult:
static = Static(
self.renderable,
classes="tile",
name="blank" if self.tile is None else str(self.tile),
)
assert self.parent is not None
static.styles.width = self.parent.styles.width
static.styles.height = self.parent.styles.height
yield static
def on_mount(self) -> None:
if self.tile is not None:
width, height = self.tile_size
self.styles.width = width
self.styles.height = height
column, row = self.position
self.set_scroll(column * width, row * height)
self.offset = self.position * self.tile_size
def watch_position(self, position: Offset) -> None:
"""The 'position' is in tile coordinate.
When it changes we animate it to the cell coordinates."""
self.animate("offset", position * self.tile_size, duration=0.2)
|
Tile
|
python
|
getsentry__sentry
|
src/sentry/integrations/vercel/client.py
|
{
"start": 266,
"end": 348
}
|
class ____(TypedDict):
limit: int
until: NotRequired[str | None]
|
_ParamsDict
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 21002,
"end": 21470
}
|
class ____(DagsterError):
"""Indicates an error while attempting to launch a backfill."""
def __init__(self, *args, **kwargs):
from dagster._utils.error import SerializableErrorInfo
self.serializable_error_info = check.opt_inst_param(
kwargs.pop("serializable_error_info", None),
"serializable_error_info",
SerializableErrorInfo,
)
super().__init__(*args, **kwargs)
|
DagsterBackfillFailedError
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/assignment8.py
|
{
"start": 144,
"end": 619
}
|
class ____:
@overload
def bar(self, obj: None) -> object: ...
@overload
def bar(self, obj: object) -> Any: ...
def bar(self, obj: object | None) -> Any:
pass
@staticmethod
def baz():
return 3
_T = TypeVar("_T")
my_obj: object
my_obj = None
my_obj = os
my_obj = Foo
my_obj = Foo()
my_obj = Foo.bar
my_obj = Foo.baz
my_obj = ()
my_obj = lambda x: x
my_obj = _T
# This should generate an error because a is unbound.
my_obj = a
|
Foo
|
python
|
facebookresearch__faiss
|
tests/test_clone.py
|
{
"start": 297,
"end": 2745
}
|
class ____(unittest.TestCase):
"""
Test clone_index for various index combinations.
"""
def do_test_clone(self, factory, with_ids=False):
"""
Verify that cloning works for a given index type
"""
d = 32
ds = datasets.SyntheticDataset(d, 1000, 2000, 10)
index1 = faiss.index_factory(d, factory)
index1.train(ds.get_train())
if with_ids:
index1.add_with_ids(ds.get_database(),
np.arange(ds.nb).astype("int64"))
else:
index1.add(ds.get_database())
k = 5
Dref1, Iref1 = index1.search(ds.get_queries(), k)
index2 = faiss.clone_index(index1)
self.assertEqual(type(index1), type(index2))
index1 = None
Dref2, Iref2 = index2.search(ds.get_queries(), k)
np.testing.assert_array_equal(Dref1, Dref2)
np.testing.assert_array_equal(Iref1, Iref2)
def test_RFlat(self):
self.do_test_clone("SQ4,RFlat")
def test_Refine(self):
self.do_test_clone("SQ4,Refine(SQ8)")
def test_IVF(self):
self.do_test_clone("IVF16,Flat")
def test_PCA(self):
self.do_test_clone("PCA8,Flat")
def test_IDMap(self):
self.do_test_clone("IVF16,Flat,IDMap", with_ids=True)
def test_IDMap2(self):
self.do_test_clone("IVF16,Flat,IDMap2", with_ids=True)
def test_NSGPQ(self):
self.do_test_clone("NSG32,Flat")
def test_IVFAdditiveQuantizer(self):
self.do_test_clone("IVF16,LSQ5x6_Nqint8")
self.do_test_clone("IVF16,RQ5x6_Nqint8")
self.do_test_clone("IVF16,PLSQ4x3x5_Nqint8")
self.do_test_clone("IVF16,PRQ4x3x5_Nqint8")
def test_IVFAdditiveQuantizerFastScan(self):
self.do_test_clone("IVF16,LSQ3x4fs_32_Nlsq2x4")
self.do_test_clone("IVF16,RQ3x4fs_32_Nlsq2x4")
self.do_test_clone("IVF16,PLSQ2x3x4fs_Nlsq2x4")
self.do_test_clone("IVF16,PRQ2x3x4fs_Nrq2x4")
def test_AdditiveQuantizer(self):
self.do_test_clone("LSQ5x6_Nqint8")
self.do_test_clone("RQ5x6_Nqint8")
self.do_test_clone("PLSQ4x3x5_Nqint8")
self.do_test_clone("PRQ4x3x5_Nqint8")
def test_AdditiveQuantizerFastScan(self):
self.do_test_clone("LSQ3x4fs_32_Nlsq2x4")
self.do_test_clone("RQ3x4fs_32_Nlsq2x4")
self.do_test_clone("PLSQ2x3x4fs_Nlsq2x4")
self.do_test_clone("PRQ2x3x4fs_Nrq2x4")
|
TestClone
|
python
|
eventlet__eventlet
|
tests/db_pool_test.py
|
{
"start": 15151,
"end": 15648
}
|
class ____(MysqlConnectionPool, RawConnectionPool, tests.LimitedTestCase):
__test__ = True
def postgres_requirement(_f):
if psycopg2 is None:
print("Skipping postgres tests, psycopg2 not importable")
return False
try:
auth = tests.get_database_auth()['psycopg2'].copy()
psycopg2.connect(**auth)
return True
except psycopg2.OperationalError:
print("Skipping postgres tests, error when connecting")
return False
|
Test02MysqlRaw
|
python
|
kamyu104__LeetCode-Solutions
|
Python/best-time-to-buy-and-sell-stock-iv.py
|
{
"start": 3208,
"end": 4123
}
|
class ____(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
def maxAtMostNPairsProfit(sprices):
profit = 0
for i in xrange(len(prices) - 1):
profit += max(0, prices[i + 1] - prices[i])
return profit
def maxAtMostKPairsProfit(prices, k):
max_buy = [float("-inf") for _ in xrange(k + 1)]
max_sell = [0 for _ in xrange(k + 1)]
for i in xrange(len(prices)):
for j in xrange(1, k + 1):
max_buy[j] = max(max_buy[j], max_sell[j-1] - prices[i])
max_sell[j] = max(max_sell[j], max_buy[j] + prices[i])
return max_sell[k]
if k >= len(prices) // 2:
return maxAtMostNPairsProfit(prices)
return maxAtMostKPairsProfit(prices, k)
|
Solution2
|
python
|
scrapy__scrapy
|
tests/test_engine.py
|
{
"start": 3431,
"end": 6802
}
|
class ____:
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.reqreached = []
self.itemerror = []
self.itemresp = []
self.headers = {}
self.bytes = defaultdict(list)
self.signals_caught = {}
self.spider_class = spider_class
async def run(self, mockserver: MockServer) -> None:
self.mockserver = mockserver
start_urls = [
self.geturl("/static/"),
self.geturl("/redirect"),
self.geturl("/redirect"), # duplicate
self.geturl("/numbers"),
]
for name, signal in vars(signals).items():
if not name.startswith("_"):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.item_error, signals.item_error)
self.crawler.signals.connect(self.headers_received, signals.headers_received)
self.crawler.signals.connect(self.bytes_received, signals.bytes_received)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(
self.request_reached, signals.request_reached_downloader
)
self.crawler.signals.connect(
self.response_downloaded, signals.response_downloaded
)
self.crawler.crawl(start_urls=start_urls)
self.deferred: defer.Deferred[None] = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
await maybe_deferred_to_future(self.deferred)
async def stop(self):
for name, signal in vars(signals).items():
if not name.startswith("_"):
disconnect_all(signal)
self.deferred.callback(None)
await self.crawler.stop_async()
def geturl(self, path: str) -> str:
return self.mockserver.url(path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_error(self, item, response, spider, failure):
self.itemerror.append((item, response, spider, failure))
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def headers_received(self, headers, body_length, request, spider):
self.headers[request] = headers
def bytes_received(self, data, request, spider):
self.bytes[request].append(data)
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_reached(self, request, spider):
self.reqreached.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop("signal")
signalargs.pop("sender", None)
self.signals_caught[sig] = signalargs
|
CrawlerRun
|
python
|
openai__gym
|
gym/spaces/discrete.py
|
{
"start": 163,
"end": 4476
}
|
class ____(Space[int]):
r"""A space consisting of finitely many elements.
This class represents a finite subset of integers, more specifically a set of the form :math:`\{ a, a+1, \dots, a+n-1 \}`.
Example::
>>> Discrete(2) # {0, 1}
>>> Discrete(3, start=-1) # {-1, 0, 1}
"""
def __init__(
self,
n: int,
seed: Optional[Union[int, np.random.Generator]] = None,
start: int = 0,
):
r"""Constructor of :class:`Discrete` space.
This will construct the space :math:`\{\text{start}, ..., \text{start} + n - 1\}`.
Args:
n (int): The number of elements of this space.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the ``Dict`` space.
start (int): The smallest element of this space.
"""
assert isinstance(n, (int, np.integer))
assert n > 0, "n (counts) have to be positive"
assert isinstance(start, (int, np.integer))
self.n = int(n)
self.start = int(start)
super().__init__((), np.int64, seed)
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return True
def sample(self, mask: Optional[np.ndarray] = None) -> int:
"""Generates a single random sample from this space.
A sample will be chosen uniformly at random with the mask if provided
Args:
mask: An optional mask for if an action can be selected.
Expected `np.ndarray` of shape `(n,)` and dtype `np.int8` where `1` represents valid actions and `0` invalid / infeasible actions.
If there are no possible actions (i.e. `np.all(mask == 0)`) then `space.start` will be returned.
Returns:
A sampled integer from the space
"""
if mask is not None:
assert isinstance(
mask, np.ndarray
), f"The expected type of the mask is np.ndarray, actual type: {type(mask)}"
assert (
mask.dtype == np.int8
), f"The expected dtype of the mask is np.int8, actual dtype: {mask.dtype}"
assert mask.shape == (
self.n,
), f"The expected shape of the mask is {(self.n,)}, actual shape: {mask.shape}"
valid_action_mask = mask == 1
assert np.all(
np.logical_or(mask == 0, valid_action_mask)
), f"All values of a mask should be 0 or 1, actual values: {mask}"
if np.any(valid_action_mask):
return int(
self.start + self.np_random.choice(np.where(valid_action_mask)[0])
)
else:
return self.start
return int(self.start + self.np_random.integers(self.n))
def contains(self, x) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (
np.issubdtype(x.dtype, np.integer) and x.shape == ()
):
as_int = int(x) # type: ignore
else:
return False
return self.start <= as_int < self.start + self.n
def __repr__(self) -> str:
"""Gives a string representation of this space."""
if self.start != 0:
return f"Discrete({self.n}, start={self.start})"
return f"Discrete({self.n})"
def __eq__(self, other) -> bool:
"""Check whether ``other`` is equivalent to this instance."""
return (
isinstance(other, Discrete)
and self.n == other.n
and self.start == other.start
)
def __setstate__(self, state):
"""Used when loading a pickled space.
This method has to be implemented explicitly to allow for loading of legacy states.
Args:
state: The new state
"""
# Don't mutate the original state
state = dict(state)
# Allow for loading of legacy states.
# See https://github.com/openai/gym/pull/2470
if "start" not in state:
state["start"] = 0
super().__setstate__(state)
|
Discrete
|
python
|
PyCQA__pylint
|
tests/functional/r/regression_02/regression_enum_1734.py
|
{
"start": 241,
"end": 653
}
|
class ____(Enum):
LOADED = "loaded", True
SETUP_ERROR = "setup_error", True
_recoverable: bool
def __new__(cls, value: str, recoverable: bool):
obj = object.__new__(cls)
obj._value_ = value
obj._recoverable = recoverable
return obj
@property
def recoverable(self) -> bool:
"""Get if the state is recoverable."""
return self._recoverable
|
Test
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-k8s/dagster_k8s/client.py
|
{
"start": 2494,
"end": 2856
}
|
class ____(Exception):
pass
WHITELISTED_TRANSIENT_K8S_STATUS_CODES = [
503, # Service unavailable
504, # Gateway timeout
500, # Internal server error
# typically not transient, but some k8s clusters raise it transiently: https://github.com/aws/containers-roadmap/issues/1810
401, # Authorization Failure
]
|
DagsterK8sJobStatusException
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/graphicsItems/CurvePoint.py
|
{
"start": 4034,
"end": 4689
}
|
class ____(CurvePoint):
"""Provides an arrow that points to any specific sample on a PlotCurveItem.
Provides properties that can be animated."""
def __init__(self, curve, index=0, pos=None, **opts):
CurvePoint.__init__(self, curve, index=index, pos=pos)
if opts.get('pxMode', True):
opts['pxMode'] = False
self.setFlags(self.flags() | self.GraphicsItemFlag.ItemIgnoresTransformations)
opts['angle'] = 0
self.arrow = ArrowItem.ArrowItem(**opts)
self.arrow.setParentItem(self)
def setStyle(self, **opts):
return self.arrow.setStyle(**opts)
|
CurveArrow
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/python.py
|
{
"start": 3568,
"end": 8216
}
|
class ____(BaseCommand):
"""Install a Python interpreter with PDM"""
arguments = (verbose_option,)
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"version",
help="The Python version to install (e.g. cpython@3.10.3). If left empty, "
"highest cPython version that matches this platform/arch is installed. "
"If pyproject.toml with requires-python is available, this is considered as well.",
nargs="?",
)
parser.add_argument("--list", "-l", action="store_true", help="List all available Python versions")
parser.add_argument(
"--min",
action="store_true",
help="Use minimum instead of highest version for installation if `version` is left empty",
)
def handle(self, project: Project, options: Namespace) -> None:
if options.list:
for version in get_all_installable_python_versions(build_dir=False):
project.core.ui.echo(str(version))
return
version = options.version
if version is None:
match = project.get_best_matching_cpython_version(options.min)
if match is not None:
version = str(match)
if version is None:
raise PdmArgumentError("Please specify a Python version to be installed. E.g. cpython@3.10.3")
self.install_python(project, version)
@staticmethod
def install_python(project: Project, request: str) -> PythonInfo:
from pbs_installer import download, get_download_link, install_file
from pbs_installer._install import THIS_ARCH
from pdm.termui import logger
ui = project.core.ui
root = Path(project.config["python.install_root"]).expanduser()
implementation, _, version = request.rpartition("@")
implementation = implementation.lower() or "cpython"
version, _, arch = version.partition("-")
arch = "x86" if arch == "32" else (arch or THIS_ARCH)
ver, python_file = get_download_link(version, implementation=implementation, arch=arch, build_dir=False)
ver_str = str(ver)
spinner_msg = f"Downloading [success]{ver_str}[/]"
if ui.verbosity >= Verbosity.DETAIL:
download_url = python_file[0] if isinstance(python_file, (tuple, list)) else python_file
spinner_msg += f" {download_url}"
with ui.open_spinner(spinner_msg) as spinner:
destination = root / ver_str
logger.debug("Installing %s to %s", ver_str, destination)
env = BareEnvironment(project)
install_root = destination
if install_root.joinpath("install").exists():
install_root = install_root.joinpath("install")
interpreter = install_root / "bin" / "python3" if sys.platform != "win32" else destination / "python.exe"
if not destination.exists() or not interpreter.exists():
shutil.rmtree(destination, ignore_errors=True)
destination.mkdir(parents=True, exist_ok=True)
with tempfile.NamedTemporaryFile() as tf:
tf.close()
original_filename = download(python_file, tf.name, env.session)
spinner.update(f"Installing [success]{ver_str}[/]")
try:
install_file(tf.name, destination, original_filename)
except ModuleNotFoundError as e:
if "zstandard is required" in str(e):
raise InstallationError(
"zstandard is required to install this Python version. "
"Please install it with `pdm self add zstandard`."
) from None
if destination.joinpath("install").exists():
install_root = destination.joinpath("install")
interpreter = install_root / "bin" / "python3" if sys.platform != "win32" else install_root / "python.exe"
if not interpreter.exists():
raise InstallationError("Installation failed, please try again.")
python_info = PythonInfo.from_path(interpreter)
ui.echo(
f"[success]Successfully installed[/] {python_info.implementation}@{python_info.version}",
verbosity=Verbosity.NORMAL,
)
ui.echo(f"[info]Version:[/] {python_info.version}", verbosity=Verbosity.NORMAL)
ui.echo(f"[info]Executable:[/] {python_info.path}", verbosity=Verbosity.NORMAL)
return python_info
|
InstallCommand
|
python
|
walkccc__LeetCode
|
solutions/3545. Minimum Deletions for At Most K Distinct Characters/3545.py
|
{
"start": 0,
"end": 212
}
|
class ____:
def minDeletion(self, s: str, k: int) -> int:
count = collections.Counter(s)
if len(count) <= k:
return 0
freqs = sorted(count.values())
return sum(freqs[:len(freqs) - k])
|
Solution
|
python
|
pypa__hatch
|
tests/config/test_model.py
|
{
"start": 9562,
"end": 17154
}
|
class ____:
def test_default(self, default_cache_dir, default_data_dir):
config = RootConfig({})
default_cache_directory = str(default_cache_dir)
default_data_directory = str(default_data_dir)
assert config.dirs.project == config.dirs.project == []
assert config.dirs.env == config.dirs.env == {}
assert config.dirs.python == config.dirs.python == "isolated"
assert config.dirs.cache == config.dirs.cache == default_cache_directory
assert config.dirs.data == config.dirs.data == default_data_directory
assert config.raw_data == {
"dirs": {
"project": [],
"env": {},
"python": "isolated",
"data": default_data_directory,
"cache": default_cache_directory,
},
}
def test_not_table(self, helpers):
config = RootConfig({"dirs": 9000})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs
must be a table"""
),
):
_ = config.dirs
def test_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs = 9000
assert config.raw_data == {"dirs": 9000}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs
must be a table"""
),
):
_ = config.dirs
def test_project(self):
config = RootConfig({"dirs": {"project": ["foo"]}})
assert config.dirs.project == ["foo"]
assert config.raw_data == {"dirs": {"project": ["foo"]}}
def test_project_not_array(self, helpers):
config = RootConfig({"dirs": {"project": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> project
must be an array"""
),
):
_ = config.dirs.project
def test_project_entry_not_string(self, helpers):
config = RootConfig({"dirs": {"project": [9000]}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> project -> 1
must be a string"""
),
):
_ = config.dirs.project
def test_project_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs.project = 9000
assert config.raw_data == {"dirs": {"project": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> project
must be an array"""
),
):
_ = config.dirs.project
def test_env(self):
config = RootConfig({"dirs": {"env": {"foo": "bar"}}})
assert config.dirs.env == {"foo": "bar"}
assert config.raw_data == {"dirs": {"env": {"foo": "bar"}}}
def test_env_not_table(self, helpers):
config = RootConfig({"dirs": {"env": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> env
must be a table"""
),
):
_ = config.dirs.env
def test_env_value_not_string(self, helpers):
config = RootConfig({"dirs": {"env": {"foo": 9000}}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> env -> foo
must be a string"""
),
):
_ = config.dirs.env
def test_env_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs.env = 9000
assert config.raw_data == {"dirs": {"env": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> env
must be a table"""
),
):
_ = config.dirs.env
def test_python(self):
config = RootConfig({"dirs": {"python": "foo"}})
assert config.dirs.python == "foo"
assert config.raw_data == {"dirs": {"python": "foo"}}
def test_python_not_string(self, helpers):
config = RootConfig({"dirs": {"python": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> python
must be a string"""
),
):
_ = config.dirs.python
def test_python_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs.python = 9000
assert config.raw_data == {"dirs": {"python": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> python
must be a string"""
),
):
_ = config.dirs.python
def test_data(self):
config = RootConfig({"dirs": {"data": "foo"}})
assert config.dirs.data == "foo"
assert config.raw_data == {"dirs": {"data": "foo"}}
def test_data_not_string(self, helpers):
config = RootConfig({"dirs": {"data": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> data
must be a string"""
),
):
_ = config.dirs.data
def test_data_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs.data = 9000
assert config.raw_data == {"dirs": {"data": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> data
must be a string"""
),
):
_ = config.dirs.data
def test_cache(self):
config = RootConfig({"dirs": {"cache": "foo"}})
assert config.dirs.cache == "foo"
assert config.raw_data == {"dirs": {"cache": "foo"}}
def test_cache_not_string(self, helpers):
config = RootConfig({"dirs": {"cache": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> cache
must be a string"""
),
):
_ = config.dirs.cache
def test_cache_set_lazy_error(self, helpers):
config = RootConfig({})
config.dirs.cache = 9000
assert config.raw_data == {"dirs": {"cache": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
dirs -> cache
must be a string"""
),
):
_ = config.dirs.cache
|
TestDirs
|
python
|
pytorch__pytorch
|
test/fx/test_fx_split.py
|
{
"start": 8144,
"end": 10675
}
|
class ____(TestCase):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3, stride=1, bias=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
conv = self.conv(x)
conv = conv * 0.5
relu = self.relu(conv)
return relu
@staticmethod
def trace_and_tag(
module: torch.nn.Module, inputs: torch.Tensor, tags: list[str]
) -> tuple[torch.fx.GraphModule, dict[str, list[str]]]:
"""
Test simple gm consists of nodes with tag (only show call_module nodes here):
conv - tag: "red"
mul - tag: "blue"
relu - tag: "green"
At the beginning we have:
gm:
conv
mul
relu
split_gm = split_by_tags(gm, tags)
Then we have:
split_gm:
red:
conv
blue:
mul
green:
relu
"""
tag_node = defaultdict(list)
gm: torch.fx.GraphModule = torch.export.export(
module, (inputs,), strict=True
).module()
# Add tag to all nodes and build dictionary record tag to call_module nodes
for node in gm.graph.nodes:
if "conv" in node.name:
node.tag = tags[0]
tag_node[tags[0]].append(node.name)
elif "mul" in node.name:
node.tag = tags[1]
tag_node[tags[1]].append(node.name)
else:
node.tag = tags[2]
if node.op == "call_module":
tag_node[tags[2]].append(node.name)
return gm, tag_node
def test_split_by_tags(self) -> None:
tags = ["red", "blue", "green"]
module = TestSplitOutputType.TestModule()
inputs = torch.randn((1, 3, 224, 224))
gm, _ = TestSplitOutputType.trace_and_tag(module, inputs, tags)
split_gm, _ = split_by_tags(gm, tags, return_fqn_mapping=True)
gm_output = module(inputs)
split_gm_output = split_gm(inputs)
self.assertTrue(type(gm_output) is type(split_gm_output))
self.assertTrue(torch.equal(gm_output, split_gm_output))
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
|
TestSplitOutputType
|
python
|
walkccc__LeetCode
|
solutions/677. Map Sum Pairs/677.py
|
{
"start": 101,
"end": 623
}
|
class ____:
def __init__(self):
self.root = TrieNode()
self.keyToVal = {}
def insert(self, key: str, val: int) -> None:
diff = val - self.keyToVal.get(key, 0)
node: TrieNode = self.root
for c in key:
node = node.children.setdefault(c, TrieNode())
node.sum += diff
self.keyToVal[key] = val
def sum(self, prefix: str) -> int:
node: TrieNode = self.root
for c in prefix:
if c not in node.children:
return 0
node = node.children[c]
return node.sum
|
MapSum
|
python
|
pytorch__pytorch
|
torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py
|
{
"start": 365,
"end": 3772
}
|
class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
instance_norm = partial(torch.instance_norm, cudnn_enabled=True)
expanded_args, expanded_kwargs = standard_kwargs(
kwarg_names, expanded_args_and_kwargs
)
output = forward_helper(instance_norm, expanded_args, expanded_kwargs)
ctx.input = expanded_args[0]
ctx.running_mean, ctx.running_var = (
expanded_kwargs["running_mean"],
expanded_kwargs["running_var"],
)
ctx.weight, ctx.bias, ctx.eps = (
expanded_kwargs["weight"],
expanded_kwargs["bias"],
expanded_kwargs["eps"],
)
return output
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
input, running_mean, running_var = ctx.input, ctx.running_mean, ctx.running_var
weight, bias, eps = ctx.weight, ctx.bias, ctx.eps
results: list[torch.Tensor | None] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if input.requires_grad:
b = input.shape[0]
c = input.shape[1]
new_shape = (1, b * c, *input.shape[2:])
weight_ = unpack_expanded_weight_or_tensor(
weight, lambda orig_weight: orig_weight.repeat(b)
)
running_mean_ = running_mean.repeat(b) if running_mean is not None else None
running_var_ = running_var.repeat(b) if running_var is not None else None
input_reshaped = input.contiguous().view(new_shape)
grad_output_reshaped = grad_output.contiguous().view(new_shape)
mean = torch.mean(
input_reshaped, (0,) + tuple(range(2, input.dim())), False
)
var = torch.var(
input_reshaped,
(0,) + tuple(range(2, input.dim())),
keepdim=False,
unbiased=False,
)
rstd = 1 / torch.sqrt(var + eps)
# must use native batch norm since it supports all inputs. This may have used cuda or openmi during the forward but
# it didn't save the metadata, so we don't know during the backward
res = torch.ops.aten.native_batch_norm_backward(
grad_output_reshaped,
input_reshaped,
weight_,
running_mean_,
running_var_,
mean,
rstd,
True,
eps,
(True, False, False),
)
results.append(res[0].reshape(input.shape))
else:
results.append(None)
# weight and bias don't compute batched gradients; no other arguments are differentiable (2 are not saved from the forward)
results = results + [None] * 7
# set grad_sample field for weight and bias with per sample gradients
set_grad_sample_if_exists(
weight,
lambda _: torch.einsum(
"ni...->ni", F.instance_norm(input, eps=eps) * grad_output
),
)
set_grad_sample_if_exists(
bias, lambda _: torch.einsum("ni...->ni", grad_output)
)
return tuple(results)
|
InstanceNormPerSampleGrad
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/connections.py
|
{
"start": 2529,
"end": 3146
}
|
class ____(BaseModel):
"""A class to store the behavior of each standard field of a Hook."""
hidden: Annotated[
bool,
Field(description="Flag if the form field should be hidden."),
] = False
title: Annotated[
str | None,
Field(
description="Label / title for the field that should be displayed, if re-labelling is needed. Use `None` to display standard title."
),
] = None
placeholder: Annotated[
str | None,
Field(description="Placeholder text that should be populated to the form."),
] = None
|
ConnectionHookFieldBehavior
|
python
|
google__jax
|
jax/_src/dispatch.py
|
{
"start": 3807,
"end": 6140
}
|
class ____(threading.local):
"""See docstring for effects.py module for the calling convention for tokens."""
# For each ordered effect, the token returned by the last dispatched
# computation, sharded over the devices in that computation.
current_tokens: dict[core.Effect, core.Token]
# For each device, the runtime token returned by the last dispatched
# computation on that device.
output_runtime_tokens: dict[Device, RuntimeToken]
def __init__(self):
self.current_tokens = {}
self.output_runtime_tokens = {}
def get_token_input(
self, eff: core.Effect, devices: list[Device]
) -> core.Token:
tok = self.current_tokens.get(eff, np.zeros(0, np.bool_))
if isinstance(tok, core.Token):
# The order of devices may change, so we need to reshard if necessary.
# TODO(yueshengys): This might still be buggy in a multi-process SPMD
# scenario. Revise the logic later. A distributed shutdown barrier inside
# the XLA program may be needed.
return api.device_put(
tok, NamedSharding(Mesh(devices, 'x'), PartitionSpec('x')))
# We only use replicated sharding for the first time when the token for the
# order effect hasn't been created.
s = GSPMDSharding.get_replicated(devices)
sharded_tok = core.Token(
pxla.shard_args(
[s], [None], [xc.ArrayCopySemantics.REUSE_INPUT], [tok]
)[0]
)
self.current_tokens[eff] = sharded_tok
return sharded_tok
def set_token_result(self, eff: core.Effect, token: core.Token):
self.current_tokens[eff] = token
def set_output_runtime_token(self, device: Device, token: RuntimeToken):
# We're free to clobber the previous output token because on each
# device we have a total ordering of computations. Only the token
# from the latest computation matters.
self.output_runtime_tokens[device] = token
def clear(self):
self.current_tokens = {}
self.output_runtime_tokens = {}
def block_until_ready(self):
for token in self.current_tokens.values():
token.block_until_ready()
for token in self.output_runtime_tokens.values():
token.block_until_ready()
self.clear()
runtime_tokens: RuntimeTokenSet = RuntimeTokenSet()
@atexit.register
def wait_for_tokens():
runtime_tokens.block_until_ready()
|
RuntimeTokenSet
|
python
|
fsspec__filesystem_spec
|
fsspec/caching.py
|
{
"start": 8215,
"end": 9816
}
|
class ____(BaseCache):
"""Cache which reads only when we get beyond a block of data
This is a much simpler version of BytesCache, and does not attempt to
fill holes in the cache or keep fragments alive. It is best suited to
many small reads in a sequential order (e.g., reading lines from a file).
"""
name = "readahead"
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
super().__init__(blocksize, fetcher, size)
self.cache = b""
self.start = 0
self.end = 0
def _fetch(self, start: int | None, end: int | None) -> bytes:
if start is None:
start = 0
if end is None or end > self.size:
end = self.size
if start >= self.size or start >= end:
return b""
l = end - start
if start >= self.start and end <= self.end:
# cache hit
self.hit_count += 1
return self.cache[start - self.start : end - self.start]
elif self.start <= start < self.end:
# partial hit
self.miss_count += 1
part = self.cache[start - self.start :]
l -= len(part)
start = self.end
else:
# miss
self.miss_count += 1
part = b""
end = min(self.size, end + self.blocksize)
self.total_requested_bytes += end - start
self.cache = self.fetcher(start, end) # new block replaces old
self.start = start
self.end = self.start + len(self.cache)
return part + self.cache[:l]
|
ReadAheadCache
|
python
|
getsentry__sentry
|
src/sentry/testutils/cases.py
|
{
"start": 80034,
"end": 85107
}
|
class ____(
TestCase,
BaseTestCase, # forcing this to explicitly inherit BaseTestCase addresses some type hint issues
):
def store_functions(
self,
functions,
project,
transaction=None,
extras=None,
timestamp=None,
):
if transaction is None:
transaction = load_data("transaction", timestamp=timestamp or before_now(minutes=10))
profile_context = transaction.setdefault("contexts", {}).setdefault("profile", {})
if profile_context.get("profile_id") is None:
profile_context["profile_id"] = uuid4().hex
profile_id = profile_context.get("profile_id")
self.store_event(transaction, project_id=project.id)
timestamp = transaction["timestamp"]
functions = [
{
**function,
"self_times_ns": list(map(int, function["self_times_ns"])),
"fingerprint": self.function_fingerprint(function),
}
for function in functions
]
functions_payload = {
"functions": functions,
# the transaction platform doesn't quite match the
# profile platform, but should be fine for tests
"platform": transaction["platform"],
"profile_id": profile_id,
"project_id": project.id,
"received": int(timezone.now().timestamp()),
"retention_days": 90,
"timestamp": int(timestamp),
"transaction_name": transaction["transaction"],
"materialization_version": 1,
}
if extras is not None:
functions_payload.update(extras)
response = requests.post(
settings.SENTRY_SNUBA + "/tests/entities/functions/insert",
json=[functions_payload],
)
assert response.status_code == 200
return {
"transaction": transaction,
"functions": functions,
}
def store_functions_chunk(
self,
functions,
project,
profiler_id=None,
extras=None,
timestamp=None,
):
if profiler_id is None:
profiler_id = uuid4().hex
# TODO: also write to chunks dataset
chunk_id = uuid4().hex
functions = [
{
**function,
"self_times_ns": list(map(int, function["self_times_ns"])),
"fingerprint": self.function_fingerprint(function),
}
for function in functions
]
timestamp = (timestamp or timezone.now()).timestamp()
max_duration = max(
duration for function in functions for duration in function["self_times_ns"]
)
start = timestamp
end = timestamp + max_duration / 1e9
functions_payload = {
"functions": functions,
"platform": "",
"transaction_name": "",
"profile_id": profiler_id,
"project_id": project.id,
"received": int(timestamp),
"retention_days": 90,
"timestamp": int(timestamp),
"start_timestamp": start,
"end_timestamp": end,
"profiling_type": "continuous",
"materialization_version": 1,
}
if extras is not None:
functions_payload.update(extras)
response = requests.post(
settings.SENTRY_SNUBA + "/tests/entities/functions/insert",
json=[functions_payload],
)
assert response.status_code == 200
return {
"profiler_id": profiler_id,
"chunk_id": chunk_id,
"functions": functions,
}
def function_fingerprint(self, function):
# this is a different hashing algorithm than is used by vroom
# but it's not a big deal
hasher = hashlib.md5()
if function.get("package") is not None:
hasher.update(function["package"].encode())
else:
hasher.update(b"")
hasher.update(b":")
hasher.update(function["function"].encode())
return int(hasher.hexdigest()[:8], 16)
def store_span(self, span, is_eap=False):
self.store_spans([span], is_eap=is_eap)
def store_spans(self, spans, is_eap=False):
if is_eap:
files = {}
for i, span in enumerate(spans):
trace_item = span_to_trace_item(span)
files[f"item_{i}"] = trace_item.SerializeToString()
assert (
requests.post(
settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT,
files=files,
).status_code
== 200
)
else:
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/entities/spans/insert",
data=json.dumps(spans),
).status_code
== 200
)
@pytest.mark.snuba
@requires_snuba
|
ProfilesSnubaTestCase
|
python
|
PyCQA__pycodestyle
|
pycodestyle.py
|
{
"start": 86617,
"end": 102084
}
|
class ____:
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', False)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
verbose = options_dict.get('verbose', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser, verbose)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if (
filename_match(filename, filepatterns) and
not self.excluded(filename, root)
):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern matching filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument
name starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pycodestyle', version=__version__):
"""Create the parser for the program."""
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'max-doc-length', 'indent-size', 'hang-closing', 'count', 'format',
'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W) "
"(default: %s)" % DEFAULT_IGNORE)
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--max-doc-length', type='int', metavar='n',
default=None,
help="set maximum allowed doc line length and perform "
"these checks (unchecked if not set)")
parser.add_option('--indent-size', type='int', metavar='n',
default=INDENT_SIZE,
help="set how many spaces make up an indent "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report changes only within line number ranges in "
"the unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read and parse configurations.
If a config file is specified on the command line with the
"--config" option, then only it is used for configuration.
Otherwise, the user configuration (~/.config/pycodestyle) and any
local configurations in the current directory or above will be
merged together (in that order) using the read method of
ConfigParser.
"""
config = configparser.RawConfigParser()
cli_conf = options.config
local_dir = os.curdir
if USER_CONFIG and os.path.isfile(USER_CONFIG):
if options.verbose:
print('user configuration: %s' % USER_CONFIG)
config.read(USER_CONFIG)
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
if cli_conf and os.path.isfile(cli_conf):
if options.verbose:
print('cli configuration: %s' % cli_conf)
config.read(cli_conf)
pycodestyle_section = None
if config.has_section(parser.prog):
pycodestyle_section = parser.prog
elif config.has_section('pep8'):
pycodestyle_section = 'pep8' # Deprecated
warnings.warn('[pep8] section is deprecated. Use [pycodestyle].')
if pycodestyle_section:
option_list = {o.dest: o.type or o.action for o in parser.option_list}
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pycodestyle_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" {} = {}".format(opt,
config.get(pycodestyle_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pycodestyle_section, opt)
elif opt_type in ('store_true', 'store_false'):
value = config.getboolean(pycodestyle_section, opt)
else:
value = config.get(pycodestyle_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None, verbose=None):
"""Process options passed either via arglist or command line args.
Passing in the ``config_file`` parameter allows other tools, such as
flake8 to specify their own options to be processed in pycodestyle.
"""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
# If explicitly specified verbosity, override any `-v` CLI flag
if verbose is not None:
options.verbose = verbose
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = _parse_multi_options(options.filename)
options.exclude = normalize_paths(options.exclude)
options.select = _parse_multi_options(options.select)
options.ignore = _parse_multi_options(options.ignore)
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _parse_multi_options(options, split_token=','):
r"""Split and strip and discard empties.
Turns the following:
A,
B,
into ["A", "B"]
"""
if options:
return [o.strip() for o in options.split(split_token) if o.strip()]
else:
return options
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
style_guide = StyleGuide(parse_argv=True)
options = style_guide.options
report = style_guide.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
|
StyleGuide
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_bedrock.py
|
{
"start": 8884,
"end": 14124
}
|
class ____:
KNOWLEDGE_BASE_ID = "knowledge_base_id"
@pytest.fixture
def mock_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(BedrockAgentHook, "conn") as _conn:
_conn.create_knowledge_base.return_value = {
"knowledgeBase": {"knowledgeBaseId": self.KNOWLEDGE_BASE_ID}
}
yield _conn
@pytest.fixture
def bedrock_hook(self) -> Generator[BedrockAgentHook, None, None]:
with mock_aws():
hook = BedrockAgentHook()
yield hook
def setup_method(self):
self.operator = BedrockCreateKnowledgeBaseOperator(
task_id="create_knowledge_base",
name=self.KNOWLEDGE_BASE_ID,
embedding_model_arn="arn:aws:bedrock:us-east-1::foundation-model/amazon.titan-embed-text-v1",
role_arn="role-arn",
storage_config={
"type": "OPENSEARCH_SERVERLESS",
"opensearchServerlessConfiguration": {
"collectionArn": "collection_arn",
"vectorIndexName": "index_name",
"fieldMapping": {
"vectorField": "vector",
"textField": "text",
"metadataField": "text-metadata",
},
},
},
)
self.operator.defer = mock.MagicMock()
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(BedrockAgentHook, "get_waiter")
def test_create_knowledge_base_wait_combinations(
self, _, wait_for_completion, deferrable, mock_conn, bedrock_hook
):
self.operator.wait_for_completion = wait_for_completion
self.operator.deferrable = deferrable
response = self.operator.execute({})
assert response == self.KNOWLEDGE_BASE_ID
assert bedrock_hook.get_waiter.call_count == wait_for_completion
assert self.operator.defer.call_count == deferrable
def test_returns_id(self, mock_conn):
self.operator.wait_for_completion = False
result = self.operator.execute({})
assert result == self.KNOWLEDGE_BASE_ID
def test_template_fields(self):
validate_template_fields(self.operator)
def _create_validation_error(self, message: str) -> ClientError:
"""Helper to create ValidationException with specific message."""
return ClientError(
error_response={"Error": {"Message": message, "Code": "ValidationException"}},
operation_name="CreateKnowledgeBase",
)
@pytest.mark.parametrize(
("error_message", "should_retry"),
[
("no such index [bedrock-kb-index]", True),
("server returned 401", True),
("user does not have permissions", True),
("status code: 403", True),
("Bad Authorization", True),
("Some other validation error", False),
],
)
def test_retry_condition_validation(self, error_message, should_retry, mock_conn):
"""Test which error messages trigger retries."""
self.operator.wait_for_completion = False
validation_error = self._create_validation_error(error_message)
mock_conn.create_knowledge_base.side_effect = [validation_error]
if should_retry:
# For retryable errors, provide a success response for the retry
success_response = {"knowledgeBase": {"knowledgeBaseId": self.KNOWLEDGE_BASE_ID}}
mock_conn.create_knowledge_base.side_effect = [validation_error, success_response]
with mock.patch("airflow.providers.amazon.aws.operators.bedrock.sleep"):
result = self.operator.execute({})
assert result == self.KNOWLEDGE_BASE_ID
assert mock_conn.create_knowledge_base.call_count == 2
else:
# For non-retryable errors, the original error should be raised immediately
with pytest.raises(ClientError):
self.operator.execute({})
assert mock_conn.create_knowledge_base.call_count == 1
@mock.patch("airflow.providers.amazon.aws.operators.bedrock.sleep")
def test_retry_exhaustion_raises_original_error(self, mock_sleep, mock_conn):
"""Test that original error is raised when retries are exhausted."""
error_403 = self._create_validation_error(
"Dependency error document status code: 403, error message: Bad Authorization"
)
# Default number of waiter attempts is 20
mock_conn.create_knowledge_base.side_effect = [error_403] * 21
with pytest.raises(ClientError) as exc_info:
self.operator.execute({})
assert exc_info.value.response["Error"]["Code"] == "ValidationException"
assert "status code: 403" in exc_info.value.response["Error"]["Message"]
assert mock_conn.create_knowledge_base.call_count == 21
assert mock_sleep.call_count == 20
|
TestBedrockCreateKnowledgeBaseOperator
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_button02.py
|
{
"start": 315,
"end": 877
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("button02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button(
"B4", {"x_offset": 4, "y_offset": 3, "caption": "my text"}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.