language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/resolution/resolvelib/base.py
|
{
"start": 2359,
"end": 3572
}
|
class ____:
@property
def project_name(self) -> NormalizedName:
"""The "project name" of a requirement.
This is different from ``name`` if this requirement contains extras,
in which case ``name`` would contain the ``[...]`` part, while this
refers to the name of the project.
"""
raise NotImplementedError("Subclass should override")
@property
def name(self) -> str:
"""The name identifying this requirement in the resolver.
This is different from ``project_name`` if this requirement contains
extras, where ``project_name`` would not contain the ``[...]`` part.
"""
raise NotImplementedError("Subclass should override")
def is_satisfied_by(self, candidate: "Candidate") -> bool:
return False
def get_candidate_lookup(self) -> CandidateLookup:
raise NotImplementedError("Subclass should override")
def format_for_error(self) -> str:
raise NotImplementedError("Subclass should override")
def _match_link(link: Link, candidate: "Candidate") -> bool:
if candidate.source_link:
return links_equivalent(link, candidate.source_link)
return False
|
Requirement
|
python
|
scikit-learn__scikit-learn
|
sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
|
{
"start": 23217,
"end": 30032
}
|
class ____(BaseDistancesReductionDispatcher):
"""Compute radius-based class modes of row vectors of X using the
those of Y.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
RadiusNeighborsClassMode is typically used to perform bruteforce
radius neighbors queries when the weighted mode of the labels for
the nearest neighbors within the specified radius are required,
such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for RadiusNeighborsClassMode
# but it would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return sorted(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
radius,
weights,
Y_labels,
unique_Y_labels,
outlier_label,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership is provided through
the `Y_labels` parameter.
radius : float
The radius defining the neighborhood.
weights : ndarray
The weights applied to the `Y_labels` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique class labels.
outlier_label : int, default=None
Label for outlier samples (samples with no neighbors in given
radius). In the default case when the value is None if any
outlier is detected, a ValueError will be raised. The outlier
label should be selected from among the unique 'Y' labels. If
it is specified with a different value a warning will be raised
and all class probabilities of outliers will be assigned to be 0.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return RadiusNeighborsClassMode64.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighborsClassMode32.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
|
RadiusNeighborsClassMode
|
python
|
agronholm__apscheduler
|
src/apscheduler/_events.py
|
{
"start": 999,
"end": 1134
}
|
class ____(Event):
"""Base class for events originating from a data store."""
@attrs.define(kw_only=True, frozen=True)
|
DataStoreEvent
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/cursor_shapes.py
|
{
"start": 497,
"end": 1340
}
|
class ____(Enum):
# Default value that should tell the output implementation to never send
# cursor shape escape sequences. This is the default right now, because
# before this `CursorShape` functionality was introduced into
# prompt_toolkit itself, people had workarounds to send cursor shapes
# escapes into the terminal, by monkey patching some of prompt_toolkit's
# internals. We don't want the default prompt_toolkit implementation to
# interfere with that. E.g., IPython patches the `ViState.input_mode`
# property. See: https://github.com/ipython/ipython/pull/13501/files
_NEVER_CHANGE = "_NEVER_CHANGE"
BLOCK = "BLOCK"
BEAM = "BEAM"
UNDERLINE = "UNDERLINE"
BLINKING_BLOCK = "BLINKING_BLOCK"
BLINKING_BEAM = "BLINKING_BEAM"
BLINKING_UNDERLINE = "BLINKING_UNDERLINE"
|
CursorShape
|
python
|
Netflix__metaflow
|
metaflow/_vendor/click/exceptions.py
|
{
"start": 7846,
"end": 8118
}
|
class ____(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
__slots__ = ("exit_code",)
def __init__(self, code=0):
self.exit_code = code
|
Exit
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 13538,
"end": 13932
}
|
class ____(DagsterUserCodeExecutionError):
"""Indicates an error occurred while handling an output for a step."""
def __init__(self, *args, **kwargs):
self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")
self.output_name = check.str_param(kwargs.pop("output_name"), "output_name")
super().__init__(*args, **kwargs)
|
DagsterExecutionHandleOutputError
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/serialization/ssh.py
|
{
"start": 27447,
"end": 27512
}
|
class ____(enum.Enum):
USER = 1
HOST = 2
|
SSHCertificateType
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/pymssql.py
|
{
"start": 1075,
"end": 1371
}
|
class ____(MSIdentifierPreparer):
def __init__(self, dialect):
super().__init__(dialect)
# pymssql has the very unusual behavior that it uses pyformat
# yet does not require that percent signs be doubled
self._double_percents = False
|
MSIdentifierPreparer_pymssql
|
python
|
allegroai__clearml
|
clearml/backend_interface/task/repo/detectors.py
|
{
"start": 13553,
"end": 13674
}
|
class ____(EnvDetector):
def __init__(self) -> None:
super(GitEnvDetector, self).__init__("git")
|
GitEnvDetector
|
python
|
huggingface__transformers
|
tests/models/musicgen_melody/test_modeling_musicgen_melody.py
|
{
"start": 53169,
"end": 56092
}
|
class ____(unittest.TestCase):
@cached_property
def model(self):
return MusicgenMelodyForConditionalGeneration.from_pretrained("ylacombe/musicgen-stereo-melody").to(
torch_device
)
@cached_property
def processor(self):
return MusicgenMelodyProcessor.from_pretrained("ylacombe/musicgen-stereo-melody")
@slow
def test_generate_unconditional_greedy(self):
model = self.model
# only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same
unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=1).to(torch_device)
output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12, guidance_scale=1.0)
# fmt: off
EXPECTED_VALUES_LEFT = torch.tensor(
[
1.2742e-04, -8.0480e-05, 5.5788e-04, 1.0401e-03, 2.6547e-04,
1.5587e-05, -1.4211e-04, -9.7308e-05, 6.4503e-04, 5.0903e-04,
9.6475e-04, 1.0499e-03, 3.7205e-05, -5.3652e-04, -3.6579e-04, 2.5679e-04
]
)
# fmt: on
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (1, 2, 5760))
torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4)
torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4)
@slow
def test_generate_text_audio_prompt(self):
model = self.model
processor = self.processor
audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)]
text = ["80s music", "Club techno"]
inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt").to(torch_device)
output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12)
# fmt: off
EXPECTED_VALUES_LEFT_FIRST_SAMPLE = torch.tensor(
[
-0.0862, -0.1021, -0.0936, -0.0754, -0.0616, -0.0456, -0.0354, -0.0298,
-0.0036, 0.0222, 0.0523, 0.0660, 0.0496, 0.0356, 0.0457, 0.0769
]
)
EXPECTED_VALUES_RIGHT_SECOND_SAMPLE = torch.tensor(
[
-0.0327, -0.0450, -0.0264, -0.0278, -0.0365, -0.0272, -0.0401, -0.0574,
-0.0413, -0.0508, -0.0269, -0.0323, -0.0762, -0.1115, -0.1390, -0.0790
]
)
# fmt: on
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (2, 2, 5760))
torch.testing.assert_close(
output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT_FIRST_SAMPLE, rtol=1e-4, atol=1e-4
)
torch.testing.assert_close(
output_values[1, 1, :16].cpu(), EXPECTED_VALUES_RIGHT_SECOND_SAMPLE, rtol=1e-4, atol=1e-4
)
|
MusicgenMelodyStereoIntegrationTests
|
python
|
huggingface__transformers
|
src/transformers/models/glm4_moe/modular_glm4_moe.py
|
{
"start": 10454,
"end": 11838
}
|
class ____(CohereAttention):
def __init__(self, config: Glm4MoeConfig, layer_idx: Optional[int] = None):
nn.Module.__init__(self)
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_parameters = config.rope_parameters
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Glm4MoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
Glm4MoeAttention
|
python
|
PrefectHQ__prefect
|
tests/test_tasks.py
|
{
"start": 25679,
"end": 26881
}
|
class ____:
@pytest.mark.parametrize("error", [ValueError("Hello"), None])
async def test_final_state_reflects_exceptions_during_run(self, error):
@task
def bar():
if error:
raise error
@flow(version="test")
def foo():
return quote(bar(return_state=True))
task_state = foo().unquote()
# Assert the final state is correct
assert task_state.is_failed() if error else task_state.is_completed()
assert exceptions_equal(await task_state.result(raise_on_failure=False), error)
async def test_final_task_state_respects_returned_state(self):
@task
def bar():
return State(
type=StateType.FAILED,
message="Test returned state",
data=True,
)
@flow(version="test")
def foo():
return quote(bar(return_state=True))
task_state = foo().unquote()
# Assert the final state is correct
assert task_state.is_failed()
assert await task_state.result(raise_on_failure=False) is True
assert task_state.message == "Test returned state"
|
TestTaskStates
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/vml/test_write_textbox.py
|
{
"start": 289,
"end": 831
}
|
class ____(unittest.TestCase):
"""
Test the Vml _write_textbox() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_comment_textbox(self):
"""Test the _write_comment_textbox() method"""
self.vml._write_comment_textbox()
exp = """<v:textbox style="mso-direction-alt:auto"><div style="text-align:left"></div></v:textbox>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteVtextbox
|
python
|
django__django
|
django/template/context.py
|
{
"start": 598,
"end": 3763
}
|
class ____:
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {"True": True, "False": False, "None": None}
self.dicts = [builtins]
if isinstance(value, BaseContext):
self.dicts += value.dicts[1:]
elif value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = BaseContext()
duplicate.__class__ = self.__class__
duplicate.__dict__ = copy(self.__dict__)
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
return reversed(self.dicts)
def push(self, *args, **kwargs):
dicts = []
for d in args:
if isinstance(d, BaseContext):
dicts += d.dicts[1:]
else:
dicts.append(d)
return ContextDict(self, *dicts, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def set_upward(self, key, value):
"""
Set a variable in one of the higher contexts if it exists there,
otherwise in the current context.
"""
context = self.dicts[-1]
for d in reversed(self.dicts):
if key in d:
context = d
break
context[key] = value
def __getitem__(self, key):
"""
Get a variable's value, starting at the current context and going
upward
"""
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def __contains__(self, key):
return any(key in d for d in self.dicts)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Return a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Return self.dicts as one dictionary.
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compare two contexts by comparing theirs 'dicts' attributes.
"""
if not isinstance(other, BaseContext):
return NotImplemented
# flatten dictionaries because they can be put in a different order.
return self.flatten() == other.flatten()
|
BaseContext
|
python
|
PrefectHQ__prefect
|
src/prefect/server/services/scheduler.py
|
{
"start": 1088,
"end": 1199
}
|
class ____(Exception):
"""Internal control-flow exception used to retry the Scheduler's main loop"""
|
TryAgain
|
python
|
getsentry__sentry
|
src/sentry/integrations/discord/message_builder/base/component/action_row.py
|
{
"start": 176,
"end": 330
}
|
class ____(TypedDict):
type: int
components: list[DiscordMessageComponentDict] # Components can be buttons, select menus, etc.
|
DiscordActionRowDict
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_custom_resource_definition.py
|
{
"start": 383,
"end": 7770
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1CustomResourceDefinitionSpec',
'status': 'V1CustomResourceDefinitionStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1CustomResourceDefinition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1CustomResourceDefinition. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1CustomResourceDefinition. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1CustomResourceDefinition.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1CustomResourceDefinition. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1CustomResourceDefinition. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1CustomResourceDefinition. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CustomResourceDefinition.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1CustomResourceDefinition. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1CustomResourceDefinition. # noqa: E501
:return: The metadata of this V1CustomResourceDefinition. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1CustomResourceDefinition.
:param metadata: The metadata of this V1CustomResourceDefinition. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1CustomResourceDefinition. # noqa: E501
:return: The spec of this V1CustomResourceDefinition. # noqa: E501
:rtype: V1CustomResourceDefinitionSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1CustomResourceDefinition.
:param spec: The spec of this V1CustomResourceDefinition. # noqa: E501
:type: V1CustomResourceDefinitionSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1CustomResourceDefinition. # noqa: E501
:return: The status of this V1CustomResourceDefinition. # noqa: E501
:rtype: V1CustomResourceDefinitionStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CustomResourceDefinition.
:param status: The status of this V1CustomResourceDefinition. # noqa: E501
:type: V1CustomResourceDefinitionStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CustomResourceDefinition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CustomResourceDefinition):
return True
return self.to_dict() != other.to_dict()
|
V1CustomResourceDefinition
|
python
|
coleifer__peewee
|
examples/hexastore.py
|
{
"start": 3104,
"end": 5022
}
|
class ____(object):
__slots__ = ('name',)
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '<Variable: %s>' % self.name
if __name__ == '__main__':
h = Hexastore()
data = (
('charlie', 'likes', 'beanie'),
('charlie', 'likes', 'huey'),
('charlie', 'likes', 'mickey'),
('charlie', 'likes', 'scout'),
('charlie', 'likes', 'zaizee'),
('huey', 'likes', 'charlie'),
('huey', 'likes', 'scout'),
('huey', 'likes', 'zaizee'),
('mickey', 'likes', 'beanie'),
('mickey', 'likes', 'charlie'),
('mickey', 'likes', 'scout'),
('zaizee', 'likes', 'beanie'),
('zaizee', 'likes', 'charlie'),
('zaizee', 'likes', 'scout'),
('charlie', 'lives', 'topeka'),
('beanie', 'lives', 'heaven'),
('huey', 'lives', 'topeka'),
('mickey', 'lives', 'topeka'),
('scout', 'lives', 'heaven'),
('zaizee', 'lives', 'lawrence'),
)
h.store_many(data)
print('added %s items to store' % len(data))
print('\nwho lives in topeka?')
for obj in h.query(p='lives', o='topeka'):
print(obj.subj)
print('\nmy friends in heaven?')
X = h.v.x
results = h.search(('charlie', 'likes', X),
(X, 'lives', 'heaven'))
for result in results:
print(result['x'])
print('\nmutual friends?')
X = h.v.x
Y = h.v.y
results = h.search((X, 'likes', Y), (Y, 'likes', X))
for result in results:
print(result['x'], ' <-> ', result['y'])
print('\nliked by both charlie, huey and mickey?')
X = h.v.x
results = h.search(('charlie', 'likes', X),
('huey', 'likes', X),
('mickey', 'likes', X))
for result in results:
print(result['x'])
|
Variable
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 93290,
"end": 93603
}
|
class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('engineId', c_uint),
('schedulerPolicy', c_uint),
('enableARRMode', c_uint),
('schedulerParams', c_nvmlVgpuSchedulerSetParams_t),
]
nvmlVgpuSchedulerState_v1 = 0x1000018
|
c_nvmlVgpuSchedulerState_v1_t
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/option_list_long.py
|
{
"start": 131,
"end": 346
}
|
class ____(App[None]):
def compose(self) -> ComposeResult:
yield OptionList(*[Option(f"This is option #{n}") for n in range(100)])
if __name__ == "__main__":
LongOptionListApp().run()
|
LongOptionListApp
|
python
|
google__jax
|
tests/pallas/ops_test.py
|
{
"start": 91532,
"end": 94675
}
|
class ____(PallasBaseTest):
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)), "<- a[:,:,:]"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)), "<- a[:3,:,:]"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)), "<- a[1:,:,:4]"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)), "<- a[b,:,:4]"),
(lambda: (jnp.arange(5)[:, None], jnp.arange(3)[None], pl.ds(4)), "<- a[f,g,:4]"),
])
def test_load_pretty_print(self, expr, expected):
def body(x_ref):
x = pallas_primitives.load(x_ref, expr())
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)), "a[:,:,:] <-"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)), "a[:3,:,:] <-"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)), "a[1:,:,:4] <-"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)), "a[b,:,:4] <-"),
(lambda: (jnp.arange(5)[:, None], jnp.arange(3)[None], pl.dslice(4)), "a[m,n,:4] <-"),
])
def test_store_pretty_print(self, expr, expected):
def body(x_ref):
pallas_primitives.store(
x_ref, expr(), pallas_primitives.load(x_ref, expr())
)
return []
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
@parameterized.parameters(*[
(lambda: (pl.dslice(0, 4), slice(None), slice(None)),
"c:i32[4,3,2], a[:,:,:] <-"),
(lambda: (pl.dslice(0, 3), slice(None), slice(None)),
"c:i32[3,3,2], a[:3,:,:] <-"),
(lambda: (pl.dslice(1, 3), slice(None), pl.dslice(0, 4)),
"c:i32[3,3,4], a[1:,:,:4] <-"),
(lambda: (jnp.arange(5), slice(None), pl.dslice(0, 4)),
"e:i32[5,3,4], a[b,:,:4] <-"),
(lambda: (jnp.arange(5)[:, None], jnp.arange(3)[None], pl.dslice(4)),
"o:i32[5,3,4], a[m,n,:4] <-"),
])
def test_swap_pretty_print(self, expr, expected):
def body(x_ref):
x = pallas_primitives.swap(
x_ref, expr(), pallas_primitives.load(x_ref, expr())
)
return [x]
jaxpr, _ , _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 1), [state.shaped_array_ref((4, 3, 2), jnp.int32)])
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
@parameterized.product(approx=[False, True])
def test_reciprocal(self, approx):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Not implemented on non-TPU devices")
shape = (32, 256)
x = jnp.arange(np.prod(shape), dtype=jnp.float32).reshape(shape)
def kernel(x_ref, o_ref):
o_ref[...] = pl.reciprocal(x_ref[...], approx=approx)
out = self.pallas_call(
kernel, out_shape=jax.ShapeDtypeStruct(shape, jnp.float32)
)(x)
kwargs = {}
if approx:
kwargs.update(dict(atol=2e-5, rtol=2e-5))
np.testing.assert_allclose(out, jax.lax.reciprocal(x), **kwargs)
|
PallasPrimitivesTest
|
python
|
apache__airflow
|
providers/apache/spark/tests/unit/apache/spark/operators/test_spark_sql.py
|
{
"start": 1057,
"end": 3371
}
|
class ____:
_config = {
"sql": "SELECT 22",
"conn_id": "spark_special_conn_id",
"total_executor_cores": 4,
"executor_cores": 4,
"executor_memory": "22g",
"keytab": "privileged_user.keytab",
"principal": "user/spark@airflow.org",
"master": "yarn-client",
"name": "special-application-name",
"num_executors": 8,
"verbose": False,
"yarn_queue": "special-queue",
}
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_execute(self):
# Given / When
operator = SparkSqlOperator(task_id="spark_sql_job", dag=self.dag, **self._config)
assert self._config["sql"] == operator.sql
assert self._config["conn_id"] == operator._conn_id
assert self._config["total_executor_cores"] == operator._total_executor_cores
assert self._config["executor_cores"] == operator._executor_cores
assert self._config["executor_memory"] == operator._executor_memory
assert self._config["keytab"] == operator._keytab
assert self._config["principal"] == operator._principal
assert self._config["executor_memory"] == operator._executor_memory
assert self._config["keytab"] == operator._keytab
assert self._config["principal"] == operator._principal
assert self._config["master"] == operator._master
assert self._config["name"] == operator._name
assert self._config["num_executors"] == operator._num_executors
assert self._config["verbose"] == operator._verbose
assert self._config["yarn_queue"] == operator._yarn_queue
@pytest.mark.db_test
def test_templating(self, create_task_instance_of_operator, session):
ti = create_task_instance_of_operator(
SparkSqlOperator,
# Templated fields
sql="{{ 'sql' }}",
# Other parameters
dag_id="test_template_body_templating_dag",
task_id="test_template_body_templating_task",
)
session.add(ti)
session.commit()
ti.render_templates()
task: SparkSqlOperator = ti.task
assert task.sql == "sql"
|
TestSparkSqlOperator
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_file_search.py
|
{
"start": 4762,
"end": 7327
}
|
class ____:
"""Tests for filesystem-backed glob search."""
def test_glob_basic_pattern(self, tmp_path: Path) -> None:
"""Test basic glob pattern matching."""
(tmp_path / "file1.py").write_text("content", encoding="utf-8")
(tmp_path / "file2.py").write_text("content", encoding="utf-8")
(tmp_path / "file3.txt").write_text("content", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path))
result = middleware.glob_search.func(pattern="*.py")
assert "/file1.py" in result
assert "/file2.py" in result
assert "/file3.txt" not in result
def test_glob_recursive_pattern(self, tmp_path: Path) -> None:
"""Test recursive glob pattern matching."""
(tmp_path / "src").mkdir()
(tmp_path / "src" / "test.py").write_text("content", encoding="utf-8")
(tmp_path / "src" / "nested").mkdir()
(tmp_path / "src" / "nested" / "deep.py").write_text("content", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path))
result = middleware.glob_search.func(pattern="**/*.py")
assert "/src/test.py" in result
assert "/src/nested/deep.py" in result
def test_glob_with_subdirectory_path(self, tmp_path: Path) -> None:
"""Test glob search starting from subdirectory."""
(tmp_path / "src").mkdir()
(tmp_path / "src" / "file1.py").write_text("content", encoding="utf-8")
(tmp_path / "other").mkdir()
(tmp_path / "other" / "file2.py").write_text("content", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path))
result = middleware.glob_search.func(pattern="*.py", path="/src")
assert "/src/file1.py" in result
assert "/other/file2.py" not in result
def test_glob_no_matches(self, tmp_path: Path) -> None:
"""Test glob search with no matches."""
(tmp_path / "file.txt").write_text("content", encoding="utf-8")
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path))
result = middleware.glob_search.func(pattern="*.py")
assert result == "No files found"
def test_glob_invalid_path(self, tmp_path: Path) -> None:
"""Test glob search with non-existent path."""
middleware = FilesystemFileSearchMiddleware(root_path=str(tmp_path))
result = middleware.glob_search.func(pattern="*.py", path="/nonexistent")
assert result == "No files found"
|
TestFilesystemGlobSearch
|
python
|
plotly__plotly.py
|
plotly/graph_objs/densitymap/_hoverlabel.py
|
{
"start": 233,
"end": 11262
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymap"
_path_str = "densitymap.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymap.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.densitymap.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.densitymap.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymap.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymap.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Hoverlabel
|
python
|
django__django
|
django/db/models/functions/text.py
|
{
"start": 8086,
"end": 8869
}
|
class ____(Transform):
function = "REVERSE"
lookup_name = "reverse"
def as_oracle(self, compiler, connection, **extra_context):
# REVERSE in Oracle is undocumented and doesn't support multi-byte
# strings. Use a special subquery instead.
suffix = connection.features.bare_select_suffix
sql, params = super().as_sql(
compiler,
connection,
template=(
"(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM "
f"(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s{suffix} "
"CONNECT BY LEVEL <= LENGTH(%(expressions)s)) "
"GROUP BY %(expressions)s)"
),
**extra_context,
)
return sql, params * 3
|
Reverse
|
python
|
scrapy__scrapy
|
tests/test_scheduler_base.py
|
{
"start": 1937,
"end": 2511
}
|
class ____(InterfaceCheckMixin):
def setup_method(self):
self.scheduler = BaseScheduler()
def test_methods(self):
assert self.scheduler.open(Spider("foo")) is None
assert self.scheduler.close("finished") is None
with pytest.raises(NotImplementedError):
self.scheduler.has_pending_requests()
with pytest.raises(NotImplementedError):
self.scheduler.enqueue_request(Request("https://example.org"))
with pytest.raises(NotImplementedError):
self.scheduler.next_request()
|
TestBaseScheduler
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/utils_test.py
|
{
"start": 1527,
"end": 8187
}
|
class ____(test.TestCase):
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testBuildTensorInfoOp(self):
x = constant_op.constant(1, name="x")
y = constant_op.constant(2, name="y")
z = control_flow_ops.group([x, y], name="op_z")
z_op_info = utils.build_tensor_info_from_op(z)
self.assertEqual("op_z", z_op_info.name)
self.assertEqual(types_pb2.DT_INVALID, z_op_info.dtype)
self.assertEqual(0, len(z_op_info.tensor_shape.dim))
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testBuildTensorInfoDense(self):
x = array_ops.placeholder(dtypes.float32, 1, name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual("x:0", x_tensor_info.name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testBuildTensorInfoSparse(self):
x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual(x.values.name,
x_tensor_info.coo_sparse.values_tensor_name)
self.assertEqual(x.indices.name,
x_tensor_info.coo_sparse.indices_tensor_name)
self.assertEqual(x.dense_shape.name,
x_tensor_info.coo_sparse.dense_shape_tensor_name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testBuildTensorInfoRagged(self):
x = ragged_factory_ops.constant([[1, 2], [3]])
x_tensor_info = utils.build_tensor_info(x)
# Check components
self.assertEqual(x.values.name,
x_tensor_info.composite_tensor.components[0].name)
self.assertEqual(types_pb2.DT_INT32,
x_tensor_info.composite_tensor.components[0].dtype)
self.assertEqual(x.row_splits.name,
x_tensor_info.composite_tensor.components[1].name)
self.assertEqual(types_pb2.DT_INT64,
x_tensor_info.composite_tensor.components[1].dtype)
# Check type_spec.
spec_proto = struct_pb2.StructuredValue(
type_spec_value=x_tensor_info.composite_tensor.type_spec)
spec = nested_structure_coder.decode_proto(spec_proto)
self.assertEqual(spec, x._type_spec)
def testBuildTensorInfoEager(self):
x = constant_op.constant(1, name="x")
with context.eager_mode(), self.assertRaisesRegex(
RuntimeError, "`build_tensor_info` is not supported"):
utils.build_tensor_info(x)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testGetTensorFromInfoDense(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, tensor.Tensor)
self.assertEqual(expected.name, actual.name)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testGetTensorFromInfoSparse(self):
expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, sparse_tensor.SparseTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.indices.name, actual.indices.name)
self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testGetTensorFromInfoRagged(self):
expected = ragged_factory_ops.constant([[1, 2], [3]], name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, ragged_tensor.RaggedTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.row_splits.name, actual.row_splits.name)
def testGetTensorFromInfoInOtherGraph(self):
with ops.Graph().as_default() as expected_graph:
expected = array_ops.placeholder(dtypes.float32, 1, name="right")
tensor_info = utils.build_tensor_info(expected)
with ops.Graph().as_default(): # Some other graph.
array_ops.placeholder(dtypes.float32, 1, name="other")
actual = utils.get_tensor_from_tensor_info(tensor_info,
graph=expected_graph)
self.assertIsInstance(actual, tensor.Tensor)
self.assertIs(actual.graph, expected_graph)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoInScope(self):
# Build a TensorInfo with name "bar/x:0".
with ops.Graph().as_default():
with ops.name_scope("bar"):
unscoped = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(unscoped)
self.assertEqual("bar/x:0", tensor_info.name)
# Build a graph with node "foo/bar/x:0", akin to importing into scope foo.
with ops.Graph().as_default():
with ops.name_scope("foo"):
with ops.name_scope("bar"):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
self.assertEqual("foo/bar/x:0", expected.name)
# Test that tensor is found by prepending the import scope.
actual = utils.get_tensor_from_tensor_info(tensor_info,
import_scope="foo")
self.assertEqual(expected.name, actual.name)
@test_util.run_v1_only(
"b/120545219: `build_tensor_info` is only available in graph mode.")
def testGetTensorFromInfoRaisesErrors(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
tensor_info.name = "blah:0" # Nonexistent name.
with self.assertRaises(KeyError):
utils.get_tensor_from_tensor_info(tensor_info)
tensor_info.ClearField("name") # Malformed (missing encoding).
with self.assertRaises(ValueError):
utils.get_tensor_from_tensor_info(tensor_info)
if __name__ == "__main__":
test.main()
|
UtilsTest
|
python
|
huggingface__transformers
|
src/transformers/models/evolla/modeling_evolla.py
|
{
"start": 42812,
"end": 45892
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
EvollaMLP
|
python
|
huggingface__transformers
|
src/transformers/models/mra/modeling_mra.py
|
{
"start": 8016,
"end": 9088
}
|
class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_query, indices, dense_key, query_num_block):
sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block)
ctx.save_for_backward(sparse_query, indices, dense_key)
ctx.query_num_block = query_num_block
return sparse_qk_prod
@staticmethod
def backward(ctx, grad):
sparse_query, indices, dense_key = ctx.saved_tensors
query_num_block = ctx.query_num_block
key_num_block = dense_key.size(1) // sparse_query.size(-1)
indices_T = transpose_indices(indices, query_num_block, key_num_block)
grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block)
grad_query = mm_to_sparse(grad, dense_key, indices)
return grad_query, None, grad_key, None
@staticmethod
def operator_call(sparse_query, indices, dense_key, query_num_block):
return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block)
|
MraSparseDenseMatMul
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/group_batch_fusion.py
|
{
"start": 25307,
"end": 31074
}
|
class ____(BatchFusion):
"""
Batch linear fusion in pre grad pass.
Fuse linear with same size with torch.baddmm
"""
def _getitem_args(self, getitem_node: torch.fx.Node):
if getitem_node.target != operator.__getitem__ or (
getitem_node.op != "call_function"
):
return None
return getitem_node.args[0]
def match(self, node: torch.fx.Node):
if CallFunctionVarArgs(torch.nn.functional.linear).match(
node
) and is_linear_node_can_be_fused(node):
input = get_arg_value(node, 0, "input")
weight = get_arg_value(node, 1, "weight")
bias = get_arg_value(node, 2, "bias")
if self.graph_search_options.get("fuse_nodes_with_same_users", False):
users = [user.target for user in node.users]
else:
users = "" # type: ignore[assignment]
group_key = (
"batch_linear",
self._getitem_args(input),
str(input.meta["example_value"].shape),
str(weight.meta["example_value"].shape),
bias is None,
str(users),
)
else:
group_key = None
return group_key
def fuse(self, graph: torch.fx.GraphModule, subset: list[torch.fx.Node]):
batch_nodes = []
batch_inputs = []
batch_weights = []
batch_biases = []
batch_inputs_metadata = []
batch_weights_metadata = []
batch_biases_metadata = []
for node in subset:
batch_nodes.append(node)
input = get_arg_value(node, 0, "input")
batch_inputs.append(input)
batch_inputs_metadata.append(input.meta["example_value"])
weight = get_arg_value(node, 1, "weight")
batch_weights.append(weight)
batch_weights_metadata.append(weight.meta["example_value"])
bias = get_arg_value(node, 2, "bias")
batch_biases.append(bias)
if bias is not None and hasattr(bias, "meta"):
batch_biases_metadata.append(bias.meta["example_value"])
with graph.inserting_before(subset[0]): # type: ignore[operator]
stack_inputs = graph.call_function( # type: ignore[operator]
torch.stack, args=(batch_inputs,), kwargs={"dim": 0}
)
update_stack_example_value(stack_inputs, batch_inputs_metadata)
stack_weights = graph.call_function( # type: ignore[operator]
torch.stack, args=(batch_weights,), kwargs={"dim": 0}
)
update_stack_example_value(stack_weights, batch_weights_metadata)
transpose_weight = graph.call_function( # type: ignore[operator]
torch.transpose, args=(stack_weights, 1, 2)
)
transpose_weight.meta["example_value"] = torch.transpose(
stack_weights.meta["example_value"], 1, 2
)
if all(bias is None for bias in batch_biases):
bmm = graph.call_function( # type: ignore[operator]
torch.bmm,
args=(stack_inputs, transpose_weight),
)
bmm.meta["example_value"] = torch.bmm(
stack_inputs.meta["example_value"],
transpose_weight.meta["example_value"],
)
bmm_meta = bmm.meta["example_value"]
else:
stack_biases = graph.call_function( # type: ignore[operator]
torch.stack, args=(batch_biases,), kwargs={"dim": 0}
)
update_stack_example_value(stack_biases, batch_biases_metadata)
unsqueeze_biases = graph.call_function( # type: ignore[operator]
torch.unsqueeze, args=(stack_biases, 1)
)
unsqueeze_biases.meta["example_value"] = torch.unsqueeze(
stack_biases.meta["example_value"], 1
)
bmm = graph.call_function( # type: ignore[operator]
torch.baddbmm,
args=(unsqueeze_biases, stack_inputs, transpose_weight),
)
try:
# it will have runtime error to broadcast when it has dynamic shape included
# in the meta data, so we need to skip the update meta data
bmm.meta["example_value"] = torch.baddbmm(
unsqueeze_biases.meta["example_value"],
stack_inputs.meta["example_value"],
transpose_weight.meta["example_value"],
)
bmm_meta = bmm.meta["example_value"]
except Exception as e:
log.debug(
f" exception when update bmm meta data with stack error tracekey {e}" # noqa: G004
)
bmm_meta = None
bmm = graph.call_function(torch.unbind, args=(bmm,), kwargs={"dim": 0}) # type: ignore[operator]
if bmm_meta is not None:
bmm.meta["example_value"] = torch.unbind(bmm_meta, dim=0)
for i, linear in enumerate(batch_nodes):
with graph.inserting_after(bmm): # type: ignore[operator]
getitem = graph.call_function(operator.getitem, args=(bmm, i)) # type: ignore[operator]
linear.replace_all_uses_with(getitem)
getitem.meta.update(linear.meta)
graph.erase_node(linear) # type: ignore[operator]
counters["inductor"]["batch_linear"] += 1
@register_fusion("batch_layernorm")
|
PreGradBatchLinearFusion
|
python
|
huggingface__transformers
|
src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
{
"start": 7804,
"end": 10591
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.is_causal = False
self.attention_dropout = config.attention_dropout
# small tweak here compared to CLIP, no bias here
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
if config.qkv_bias:
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
else:
q_bias = None
v_bias = None
if q_bias is not None:
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
self.qkv.bias = nn.Parameter(qkv_bias)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
2, 0, 3, 1, 4
)
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.projection(attn_output)
return attn_output, attn_weights
|
InstructBlipVideoAttention
|
python
|
getsentry__sentry
|
tests/sentry/sentry_metrics/test_snuba.py
|
{
"start": 404,
"end": 552
}
|
class ____(BaseMetricsLayerTestCase, TestCase, GenericMetricsTestMixIn):
def setUp(self) -> None:
super().setUp()
|
MetricsInterfaceTestCase
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/legacy_tf_layers/variable_scope_shim.py
|
{
"start": 4548,
"end": 22590
}
|
class ____(object):
"""TF2-compatible VariableStore that avoids collections & tracks regularizers.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
All variables get created in `tf.init_scope.` to avoid a bad
interaction between `tf.function` `FuncGraph` internals, Keras
Functional Models, and TPUStrategy variable initialization.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
__slots__ = ["_vars", "_regularizers", "_store_eager_variables"]
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._regularizers = {} # A dict mapping var names to their regularizers.
self._store_eager_variables = True
def get_variable(
self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
reuse = vs.AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None, # pylint: disable=unused-argument
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None, # pylint: disable=unused-argument
constraint=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE):
# Partitioned variable currently unsupported w/ the shim
if partitioner is not None:
raise ValueError(
"`partitioner` arg for `get_variable` is unsupported in TF2."
"File a bug if you need help. You passed %s" % partitioner)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
caching_device=caching_device,
validate_shape=validate_shape,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in fn_args(custom_getter) or
_has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_single_variable(
self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
caching_device=None,
validate_shape=True,
constraint=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False: # pylint: disable=g-bool-id-comparison
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
raise ValueError(err_msg)
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True: # pylint: disable=g-bool-id-comparison
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape.is_fully_defined():
if "partition_info" in tf_inspect.getargspec(initializer).args:
init_val = functools.partial(initializer,
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
else:
init_val = functools.partial(initializer,
shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
else:
init_val = initializer
variable_dtype = None
# Create the variable (Always eagerly as a workaround for a strange
# tpu / funcgraph / keras functional model interaction )
with ops.init_scope():
v = variables.Variable(
initial_value=init_val,
name=name,
trainable=trainable,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
self.add_regularizer(v, regularizer)
return v
def add_regularizer(self, var, regularizer):
self._regularizers[var.name] = functools.partial(regularizer, var)
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
|
_EagerVariableStore
|
python
|
Lightning-AI__lightning
|
examples/pytorch/domain_templates/computer_vision_fine_tuning.py
|
{
"start": 3851,
"end": 6046
}
|
class ____(LightningDataModule):
def __init__(self, dl_path: Union[str, Path] = "data", num_workers: int = 0, batch_size: int = 8):
"""CatDogImageDataModule.
Args:
dl_path: root directory where to download the data
num_workers: number of CPU workers
batch_size: number of sample in a batch
"""
super().__init__()
self._dl_path = dl_path
self._num_workers = num_workers
self._batch_size = batch_size
def prepare_data(self):
"""Download images and prepare images datasets."""
download_and_extract_archive(url=DATA_URL, download_root=self._dl_path, remove_finished=True)
@property
def data_path(self):
return Path(self._dl_path).joinpath("cats_and_dogs_filtered")
@property
def normalize_transform(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
@property
def train_transform(self):
return transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize_transform,
])
@property
def valid_transform(self):
return transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), self.normalize_transform])
def create_dataset(self, root, transform):
return ImageFolder(root=root, transform=transform)
def __dataloader(self, train: bool):
"""Train/validation loaders."""
if train:
dataset = self.create_dataset(self.data_path.joinpath("train"), self.train_transform)
else:
dataset = self.create_dataset(self.data_path.joinpath("validation"), self.valid_transform)
return DataLoader(dataset=dataset, batch_size=self._batch_size, num_workers=self._num_workers, shuffle=train)
def train_dataloader(self):
log.info("Training data loaded.")
return self.__dataloader(train=True)
def val_dataloader(self):
log.info("Validation data loaded.")
return self.__dataloader(train=False)
# --- PyTorch Lightning module ---
|
CatDogImageDataModule
|
python
|
huggingface__transformers
|
tests/models/yolos/test_modeling_yolos.py
|
{
"start": 1331,
"end": 6320
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=[30, 30],
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
n_targets=8,
num_detection_tokens=10,
attn_implementation="eager",
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.n_targets = n_targets
self.num_detection_tokens = num_detection_tokens
self.attn_implementation = attn_implementation
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
num_patches = (image_size[1] // patch_size) * (image_size[0] // patch_size)
self.expected_seq_len = num_patches + 1 + self.num_detection_tokens
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return YolosConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
num_detection_tokens=self.num_detection_tokens,
num_labels=self.num_labels,
attn_implementation=self.attn_implementation,
)
def create_and_check_model(self, config, pixel_values, labels):
model = YolosModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)
)
def create_and_check_for_object_detection(self, config, pixel_values, labels):
model = YolosForObjectDetection(config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
result = model(pixel_values=pixel_values, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
YolosModelTester
|
python
|
huggingface__transformers
|
src/transformers/models/big_bird/modeling_big_bird.py
|
{
"start": 61871,
"end": 65623
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.layer = nn.ModuleList(
[BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
for i, layer in enumerate(self.layer):
layer.set_attention_type(value, layer_idx=i)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
return_dict=True,
cache_position=None,
) -> Union[BaseModelOutputWithPastAndCrossAttentions, tuple]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
past_key_values,
output_attentions,
cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
|
BigBirdEncoder
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/prompts/base.py
|
{
"start": 11323,
"end": 13943
}
|
class ____(BasePromptTemplate): # type: ignore[no-redef]
default_template: SerializeAsAny[BasePromptTemplate]
conditionals: Optional[
Sequence[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None
def __init__(
self,
default_template: BasePromptTemplate,
conditionals: Optional[
Sequence[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None,
):
metadata = default_template.metadata
kwargs = default_template.kwargs
template_vars = default_template.template_vars
output_parser = default_template.output_parser
super().__init__(
default_template=default_template,
conditionals=conditionals,
metadata=metadata,
kwargs=kwargs,
template_vars=template_vars,
output_parser=output_parser,
)
def select(self, llm: Optional[BaseLLM] = None) -> BasePromptTemplate:
# ensure output parser is up to date
self.default_template.output_parser = self.output_parser
if llm is None:
return self.default_template
if self.conditionals is not None:
for condition, prompt in self.conditionals:
if condition(llm):
# ensure output parser is up to date
prompt.output_parser = self.output_parser
return prompt
return self.default_template
def partial_format(self, **kwargs: Any) -> "SelectorPromptTemplate":
default_template = self.default_template.partial_format(**kwargs)
if self.conditionals is None:
conditionals = None
else:
conditionals = [
(condition, prompt.partial_format(**kwargs))
for condition, prompt in self.conditionals
]
return SelectorPromptTemplate(
default_template=default_template, conditionals=conditionals
)
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
"""Format the prompt into a string."""
prompt = self.select(llm=llm)
return prompt.format(**kwargs)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
prompt = self.select(llm=llm)
return prompt.format_messages(**kwargs)
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
prompt = self.select(llm=llm)
return prompt.get_template(llm=llm)
|
SelectorPromptTemplate
|
python
|
langchain-ai__langchain
|
libs/text-splitters/langchain_text_splitters/base.py
|
{
"start": 873,
"end": 8870
}
|
class ____(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool | Literal["start", "end"] = False, # noqa: FBT001,FBT002
add_start_index: bool = False, # noqa: FBT001,FBT002
strip_whitespace: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether to keep the separator and where to place it
in each corresponding chunk `(True='start')`
add_start_index: If `True`, includes chunk's start index in metadata
strip_whitespace: If `True`, strips whitespace from the start and end of
every document
"""
if chunk_size <= 0:
msg = f"chunk_size must be > 0, got {chunk_size}"
raise ValueError(msg)
if chunk_overlap < 0:
msg = f"chunk_overlap must be >= 0, got {chunk_overlap}"
raise ValueError(msg)
if chunk_overlap > chunk_size:
msg = (
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
raise ValueError(msg)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
self._strip_whitespace = strip_whitespace
@abstractmethod
def split_text(self, text: str) -> list[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: list[str], metadatas: list[dict[Any, Any]] | None = None
) -> list[Document]:
"""Create a list of `Document` objects from a list of texts."""
metadatas_ = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = 0
previous_chunk_len = 0
for chunk in self.split_text(text):
metadata = copy.deepcopy(metadatas_[i])
if self._add_start_index:
offset = index + previous_chunk_len - self._chunk_overlap
index = text.find(chunk, max(0, offset))
metadata["start_index"] = index
previous_chunk_len = len(chunk)
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
def split_documents(self, documents: Iterable[Document]) -> list[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: list[str], separator: str) -> str | None:
text = separator.join(docs)
if self._strip_whitespace:
text = text.strip()
return text or None
def _merge_splits(self, splits: Iterable[str], separator: str) -> list[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: list[str] = []
total = 0
for d in splits:
len_ = self._length_function(d)
if (
total + len_ + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
"Created a chunk of size %d, which is longer than the "
"specified %d",
total,
self._chunk_size,
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + len_ + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += len_ + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(
cls, tokenizer: PreTrainedTokenizerBase, **kwargs: Any
) -> TextSplitter:
"""Text splitter that uses Hugging Face tokenizer to count length."""
if not _HAS_TRANSFORMERS:
msg = (
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
raise ValueError(msg)
if not isinstance(tokenizer, PreTrainedTokenizerBase):
msg = "Tokenizer received was not an instance of PreTrainedTokenizerBase" # type: ignore[unreachable]
raise ValueError(msg) # noqa: TRY004
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.tokenize(text))
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls,
encoding_name: str = "gpt2",
model_name: str | None = None,
allowed_special: Literal["all"] | AbstractSet[str] = set(),
disallowed_special: Literal["all"] | Collection[str] = "all",
**kwargs: Any,
) -> Self:
"""Text splitter that uses `tiktoken` encoder to count length."""
if not _HAS_TIKTOKEN:
msg = (
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
raise ImportError(msg)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
@override
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
|
TextSplitter
|
python
|
keras-team__keras
|
keras/src/metrics/regression_metrics_test.py
|
{
"start": 3332,
"end": 4741
}
|
class ____(testing.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name="my_mae", dtype="int32")
self.assertEqual(mae_obj.name, "my_mae")
self.assertEqual(mae_obj._dtype, "int32")
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, "my_mae")
self.assertEqual(mae_obj2._dtype, "int32")
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mae_obj.update_state(y_true, y_pred)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
|
MeanAbsoluteErrorTest
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/xaxis/_rangeselector.py
|
{
"start": 235,
"end": 13093
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.rangeselector"
_valid_props = {
"activecolor",
"bgcolor",
"bordercolor",
"borderwidth",
"buttondefaults",
"buttons",
"font",
"visible",
"x",
"xanchor",
"y",
"yanchor",
}
@property
def activecolor(self):
"""
Sets the background color of the active range selector button.
The 'activecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["activecolor"]
@activecolor.setter
def activecolor(self, val):
self["activecolor"] = val
@property
def bgcolor(self):
"""
Sets the background color of the range selector buttons.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the range selector.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the range
selector.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def buttons(self):
"""
Sets the specifications for each buttons. By default, a range
selector comes with no buttons.
The 'buttons' property is a tuple of instances of
Button that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.xaxis.rangeselector.Button
- A list or tuple of dicts of string/value properties that
will be passed to the Button constructor
Returns
-------
tuple[plotly.graph_objs.layout.xaxis.rangeselector.Button]
"""
return self["buttons"]
@buttons.setter
def buttons(self, val):
self["buttons"] = val
@property
def buttondefaults(self):
"""
When used in a template (as
layout.template.layout.xaxis.rangeselector.buttondefaults),
sets the default property values to use for elements of
layout.xaxis.rangeselector.buttons
The 'buttondefaults' property is an instance of Button
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.rangeselector.Button`
- A dict of string/value properties that will be passed
to the Button constructor
Returns
-------
plotly.graph_objs.layout.xaxis.rangeselector.Button
"""
return self["buttondefaults"]
@buttondefaults.setter
def buttondefaults(self, val):
self["buttondefaults"] = val
@property
def font(self):
"""
Sets the font of the range selector button text.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.rangeselector.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.xaxis.rangeselector.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def visible(self):
"""
Determines whether or not this range selector is visible. Note
that range selectors are only available for x axes of `type`
set to or auto-typed to "date".
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
Sets the x position (in normalized coordinates) of the range
selector.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets the range selector's horizontal position anchor. This
anchor binds the `x` position to the "left", "center" or
"right" of the range selector.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def y(self):
"""
Sets the y position (in normalized coordinates) of the range
selector.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets the range selector's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the range selector.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def _prop_descriptions(self):
return """\
activecolor
Sets the background color of the active range selector
button.
bgcolor
Sets the background color of the range selector
buttons.
bordercolor
Sets the color of the border enclosing the range
selector.
borderwidth
Sets the width (in px) of the border enclosing the
range selector.
buttons
Sets the specifications for each buttons. By default, a
range selector comes with no buttons.
buttondefaults
When used in a template (as layout.template.layout.xaxi
s.rangeselector.buttondefaults), sets the default
property values to use for elements of
layout.xaxis.rangeselector.buttons
font
Sets the font of the range selector button text.
visible
Determines whether or not this range selector is
visible. Note that range selectors are only available
for x axes of `type` set to or auto-typed to "date".
x
Sets the x position (in normalized coordinates) of the
range selector.
xanchor
Sets the range selector's horizontal position anchor.
This anchor binds the `x` position to the "left",
"center" or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
range selector.
yanchor
Sets the range selector's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
"""
def __init__(
self,
arg=None,
activecolor=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
buttons=None,
buttondefaults=None,
font=None,
visible=None,
x=None,
xanchor=None,
y=None,
yanchor=None,
**kwargs,
):
"""
Construct a new Rangeselector object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Rangeselector`
activecolor
Sets the background color of the active range selector
button.
bgcolor
Sets the background color of the range selector
buttons.
bordercolor
Sets the color of the border enclosing the range
selector.
borderwidth
Sets the width (in px) of the border enclosing the
range selector.
buttons
Sets the specifications for each buttons. By default, a
range selector comes with no buttons.
buttondefaults
When used in a template (as layout.template.layout.xaxi
s.rangeselector.buttondefaults), sets the default
property values to use for elements of
layout.xaxis.rangeselector.buttons
font
Sets the font of the range selector button text.
visible
Determines whether or not this range selector is
visible. Note that range selectors are only available
for x axes of `type` set to or auto-typed to "date".
x
Sets the x position (in normalized coordinates) of the
range selector.
xanchor
Sets the range selector's horizontal position anchor.
This anchor binds the `x` position to the "left",
"center" or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
range selector.
yanchor
Sets the range selector's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
Returns
-------
Rangeselector
"""
super().__init__("rangeselector")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.xaxis.Rangeselector
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Rangeselector`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("activecolor", arg, activecolor)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("buttons", arg, buttons)
self._set_property("buttondefaults", arg, buttondefaults)
self._set_property("font", arg, font)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Rangeselector
|
python
|
openai__openai-python
|
src/openai/resources/vector_stores/files.py
|
{
"start": 38377,
"end": 39018
}
|
class ____:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.update = to_streamed_response_wrapper(
files.update,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
self.content = to_streamed_response_wrapper(
files.content,
)
|
FilesWithStreamingResponse
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/message_bus.py
|
{
"start": 1431,
"end": 3496
}
|
class ____(BaseEventTrigger):
"""
Base trigger for Azure Service Bus message processing.
This trigger provides common functionality for listening to Azure Service Bus
queues and topics/subscriptions. It handles connection management and
async message processing.
:param poll_interval: Time interval between polling operations (seconds)
:param azure_service_bus_conn_id: Connection ID for Azure Service Bus
:param max_wait_time: Maximum time to wait for messages (seconds)
"""
default_conn_name = "azure_service_bus_default"
default_max_wait_time = None
default_poll_interval = 60
def __init__(
self,
poll_interval: float | None = None,
azure_service_bus_conn_id: str | None = None,
max_wait_time: float | None = None,
) -> None:
self.connection_id = (
azure_service_bus_conn_id
if azure_service_bus_conn_id
else BaseAzureServiceBusTrigger.default_conn_name
)
self.max_wait_time = (
max_wait_time if max_wait_time else BaseAzureServiceBusTrigger.default_max_wait_time
)
self.poll_interval = (
poll_interval if poll_interval else BaseAzureServiceBusTrigger.default_poll_interval
)
self.message_hook = MessageHook(azure_service_bus_conn_id=self.connection_id)
@abstractmethod
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize the trigger instance."""
@abstractmethod
def run(self) -> AsyncIterator[TriggerEvent]:
"""Run the trigger logic."""
@classmethod
def _get_message_body(cls, message: ServiceBusReceivedMessage) -> str:
message_body = message.body
if isinstance(message_body, bytes):
return message_body.decode("utf-8")
try:
return "".join(chunk.decode("utf-8") for chunk in message_body)
except Exception:
raise TypeError(f"Expected bytes or an iterator of bytes, but got {type(message_body).__name__}")
|
BaseAzureServiceBusTrigger
|
python
|
viewflow__viewflow
|
viewflow/forms/__init__.py
|
{
"start": 485,
"end": 753
}
|
class ____:
pass
__all__ = (
"Caption",
"Column",
"Layout",
"LayoutNode",
"Row",
"Span",
"FieldSet",
"FormLayout",
"FormSet",
"ModelForm",
"FormAjaxCompleteMixin",
"FormDependentSelectMixin",
)
|
FormDependentSelectMixin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-changes-to-make-binary-string-beautiful.py
|
{
"start": 38,
"end": 221
}
|
class ____(object):
def minChanges(self, s):
"""
:type s: str
:rtype: int
"""
return sum(s[i] != s[i+1] for i in xrange(0, len(s), 2))
|
Solution
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/vcs/git.py
|
{
"start": 1495,
"end": 18177
}
|
class ____(VersionControl):
name = "git"
dirname = ".git"
repo_name = "clone"
schemes = (
"git+http",
"git+https",
"git+ssh",
"git+git",
"git+file",
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ("GIT_DIR", "GIT_WORK_TREE")
default_arg_rev = "HEAD"
@staticmethod
def get_base_rev_args(rev: str) -> List[str]:
return [rev]
def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0])
return not is_tag_or_branch
def get_git_version(self) -> Tuple[int, ...]:
version = self.run_command(
["version"],
command_desc="git version",
show_stdout=False,
stdout_only=True,
)
match = GIT_VERSION_REGEX.match(version)
if not match:
logger.warning("Can't parse git version: %s", version)
return ()
return (int(match.group(1)), int(match.group(2)))
@classmethod
def get_current_branch(cls, location: str) -> Optional[str]:
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ["symbolic-ref", "-q", "HEAD"]
output = cls.run_command(
args,
extra_ok_returncodes=(1,),
show_stdout=False,
stdout_only=True,
cwd=location,
)
ref = output.strip()
if ref.startswith("refs/heads/"):
return ref[len("refs/heads/") :]
return None
@classmethod
def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]:
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = cls.run_command(
["show-ref", rev],
cwd=dest,
show_stdout=False,
stdout_only=True,
on_returncode="ignore",
)
refs = {}
# NOTE: We do not use splitlines here since that would split on other
# unicode separators, which can be maliciously used to install a
# different revision.
for line in output.strip().split("\n"):
line = line.rstrip("\r")
if not line:
continue
try:
ref_sha, ref_name = line.split(" ", maxsplit=2)
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError(f"unexpected show-ref line: {line!r}")
refs[ref_name] = ref_sha
branch_ref = f"refs/remotes/origin/{rev}"
tag_ref = f"refs/tags/{rev}"
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def _should_fetch(cls, dest: str, rev: str) -> bool:
"""
Return true if rev is a ref or is a commit that we don't have locally.
Branches and tags are not considered in this method because they are
assumed to be always available locally (which is a normal outcome of
``git clone`` and ``git fetch --tags``).
"""
if rev.startswith("refs/"):
# Always fetch remote refs.
return True
if not looks_like_hash(rev):
# Git fetch would fail with abbreviated commits.
return False
if cls.has_commit(dest, rev):
# Don't fetch if we have the commit locally.
return False
return True
@classmethod
def resolve_revision(
cls, dest: str, url: HiddenText, rev_options: RevOptions
) -> RevOptions:
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options = replace(rev_options, branch_name=(rev if is_branch else None))
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not cls._should_fetch(dest, rev):
return rev_options
# fetch the requested revision
cls.run_command(
make_command("fetch", "-q", url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev="FETCH_HEAD")
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revision(dest) == name
def fetch_new(
self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
) -> None:
rev_display = rev_options.to_display()
logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest))
if verbosity <= 0:
flags: Tuple[str, ...] = ("--quiet",)
elif verbosity == 1:
flags = ()
else:
flags = ("--verbose", "--progress")
if self.get_git_version() >= (2, 17):
# Git added support for partial clone in 2.17
# https://git-scm.com/docs/partial-clone
# Speeds up cloning by functioning without a complete copy of repository
self.run_command(
make_command(
"clone",
"--filter=blob:none",
*flags,
url,
dest,
)
)
else:
self.run_command(make_command("clone", *flags, url, dest))
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, "branch_name", None)
logger.debug("Rev options %s, branch_name %s", rev_options, branch_name)
if branch_name is None:
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = make_command(
"checkout",
"-q",
rev_options.to_args(),
)
self.run_command(cmd_args, cwd=dest)
elif self.get_current_branch(dest) != branch_name:
# Then a specific branch was requested, and that branch
# is not yet checked out.
track_branch = f"origin/{branch_name}"
cmd_args = [
"checkout",
"-b",
branch_name,
"--track",
track_branch,
]
self.run_command(cmd_args, cwd=dest)
else:
sha = self.get_revision(dest)
rev_options = rev_options.make_new(sha)
logger.info("Resolved %s to commit %s", url, rev_options.rev)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
self.run_command(
make_command("config", "remote.origin.url", url),
cwd=dest,
)
cmd_args = make_command("checkout", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
# First fetch changes from the default remote
if self.get_git_version() >= (1, 9):
# fetch tags in addition to everything else
self.run_command(["fetch", "-q", "--tags"], cwd=dest)
else:
self.run_command(["fetch", "-q"], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
@classmethod
def get_remote_url(cls, location: str) -> str:
"""
Return URL of the first remote encountered.
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
# We need to pass 1 for extra_ok_returncodes since the command
# exits with return code 1 if there are no matching lines.
stdout = cls.run_command(
["config", "--get-regexp", r"remote\..*\.url"],
extra_ok_returncodes=(1,),
show_stdout=False,
stdout_only=True,
cwd=location,
)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith("remote.origin.url "):
found_remote = remote
break
url = found_remote.split(" ")[1]
return cls._git_remote_to_pip_url(url.strip())
@staticmethod
def _git_remote_to_pip_url(url: str) -> str:
"""
Convert a remote url from what git uses to what pip accepts.
There are 3 legal forms **url** may take:
1. A fully qualified url: ssh://git@example.com/foo/bar.git
2. A local project.git folder: /path/to/bare/repository.git
3. SCP shorthand for form 1: git@example.com:foo/bar.git
Form 1 is output as-is. Form 2 must be converted to URI and form 3 must
be converted to form 1.
See the corresponding test test_git_remote_url_to_pip() for examples of
sample inputs/outputs.
"""
if re.match(r"\w+://", url):
# This is already valid. Pass it though as-is.
return url
if os.path.exists(url):
# A local bare remote (git clone --mirror).
# Needs a file:// prefix.
return pathlib.PurePath(url).as_uri()
scp_match = SCP_REGEX.match(url)
if scp_match:
# Add an ssh:// prefix and replace the ':' with a '/'.
return scp_match.expand(r"ssh://\1\2/\3")
# Otherwise, bail out.
raise RemoteNotValidError(url)
@classmethod
def has_commit(cls, location: str, rev: str) -> bool:
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
["rev-parse", "-q", "--verify", "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True
@classmethod
def get_revision(cls, location: str, rev: Optional[str] = None) -> str:
if rev is None:
rev = "HEAD"
current_rev = cls.run_command(
["rev-parse", rev],
show_stdout=False,
stdout_only=True,
cwd=location,
)
return current_rev.strip()
@classmethod
def get_subdirectory(cls, location: str) -> Optional[str]:
"""
Return the path to Python project root, relative to the repo root.
Return None if the project root is in the repo root.
"""
# find the repo root
git_dir = cls.run_command(
["rev-parse", "--git-dir"],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, ".."))
return find_path_to_project_root_from_repo_root(location, repo_root)
@classmethod
def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith("file"):
initial_slashes = path[: -len(path.lstrip("/"))]
newpath = initial_slashes + urllib.request.url2pathname(path).replace(
"\\", "/"
).lstrip("/")
after_plus = scheme.find("+") + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
if "://" not in url:
assert "file:" not in url
url = url.replace("git+", "git+ssh://")
url, rev, user_pass = super().get_url_rev_and_auth(url)
url = url.replace("ssh://", "")
else:
url, rev, user_pass = super().get_url_rev_and_auth(url)
return url, rev, user_pass
@classmethod
def update_submodules(cls, location: str) -> None:
if not os.path.exists(os.path.join(location, ".gitmodules")):
return
cls.run_command(
["submodule", "update", "--init", "--recursive", "-q"],
cwd=location,
)
@classmethod
def get_repository_root(cls, location: str) -> Optional[str]:
loc = super().get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
["rev-parse", "--show-toplevel"],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode="raise",
log_failed_cmd=False,
)
except BadCommand:
logger.debug(
"could not determine if %s is under git control "
"because git is not available",
location,
)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip("\r\n"))
@staticmethod
def should_add_vcs_url_prefix(repo_url: str) -> bool:
"""In either https or ssh form, requirements must be prefixed with git+."""
return True
vcs.register(Git)
|
Git
|
python
|
sanic-org__sanic
|
sanic/worker/multiplexer.py
|
{
"start": 253,
"end": 5467
}
|
class ____:
"""Multiplexer for Sanic workers.
This is instantiated inside of worker porocesses only. It is used to
communicate with the monitor process.
Args:
monitor_publisher (Connection): The connection to the monitor.
worker_state (Dict[str, Any]): The state of the worker.
"""
def __init__(
self,
monitor_publisher: Connection,
worker_state: dict[str, Any],
):
self._monitor_publisher = monitor_publisher
self._state = WorkerState(worker_state, self.name)
def ack(self):
"""Acknowledge the worker is ready."""
logger.debug(
f"{Colors.BLUE}Process ack: {Colors.BOLD}{Colors.SANIC}"
f"%s {Colors.BLUE}[%s]{Colors.END}",
self.name,
self.pid,
)
self._state._state[self.name] = {
**self._state._state[self.name],
"state": ProcessState.ACKED.name,
}
def manage(
self,
ident: str,
func: Callable[..., Any],
kwargs: dict[str, Any],
transient: bool = False,
restartable: Optional[bool] = None,
tracked: bool = False,
auto_start: bool = True,
workers: int = 1,
) -> None:
"""Manages the initiation and monitoring of a worker process.
Args:
ident (str): A unique identifier for the worker process.
func (Callable[..., Any]): The function to be executed in the worker process.
kwargs (Dict[str, Any]): A dictionary of arguments to be passed to `func`.
transient (bool, optional): Flag to mark the process as transient. If `True`,
the Worker Manager will restart the process with any global restart
(e.g., auto-reload). Defaults to `False`.
restartable (Optional[bool], optional): Flag to mark the process as restartable. If `True`,
the Worker Manager can restart the process if prompted. Defaults to `None`.
tracked (bool, optional): Flag to indicate whether the process should be tracked
after its completion. Defaults to `False`.
auto_start (bool, optional): Flag to indicate whether the process should be started
workers (int, optional): The number of worker processes to run. Defaults to 1.
This method packages the provided arguments into a bundle and sends them back to the
main process to be managed by the Worker Manager.
""" # noqa: E501
bundle = (
ident,
func,
kwargs,
transient,
restartable,
tracked,
auto_start,
workers,
)
self._monitor_publisher.send(bundle)
def set_serving(self, serving: bool) -> None:
"""Set the worker to serving.
Args:
serving (bool): Whether the worker is serving.
"""
self._state._state[self.name] = {
**self._state._state[self.name],
"serving": serving,
}
def exit(self):
"""Run cleanup at worker exit."""
try:
del self._state._state[self.name]
except ConnectionRefusedError:
logger.debug("Monitor process has already exited.")
def restart(
self,
name: str = "",
all_workers: bool = False,
zero_downtime: bool = False,
):
"""Restart the worker.
Args:
name (str): The name of the process to restart.
all_workers (bool): Whether to restart all workers.
zero_downtime (bool): Whether to restart with zero downtime.
"""
if name and all_workers:
raise ValueError(
"Ambiguous restart with both a named process and"
" all_workers=True"
)
if not name:
name = "__ALL_PROCESSES__:" if all_workers else self.name
if not name.endswith(":"):
name += ":"
if zero_downtime:
name += ":STARTUP_FIRST"
self._monitor_publisher.send(name)
reload = restart # no cov
"""Alias for restart."""
def scale(self, num_workers: int):
"""Scale the number of workers.
Args:
num_workers (int): The number of workers to scale to.
"""
message = f"__SCALE__:{num_workers}"
self._monitor_publisher.send(message)
def terminate(self, early: bool = False):
"""Terminate the worker.
Args:
early (bool): Whether to terminate early.
"""
message = "__TERMINATE_EARLY__" if early else "__TERMINATE__"
self._monitor_publisher.send(message)
@property
def pid(self) -> int:
"""The process ID of the worker."""
return getpid()
@property
def name(self) -> str:
"""The name of the worker."""
return environ.get("SANIC_WORKER_NAME", "")
@property
def state(self):
"""The state of the worker."""
return self._state
@property
def workers(self) -> dict[str, Any]:
"""The state of all workers."""
return self.state.full()
|
WorkerMultiplexer
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/rules/biases/boost_latest_releases_bias.py
|
{
"start": 454,
"end": 2808
}
|
class ____(Bias):
datetime_format = "%Y-%m-%dT%H:%M:%SZ"
def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]:
factor = apply_dynamic_factor(base_sample_rate, LATEST_RELEASES_BOOST_FACTOR)
boosted_releases = ProjectBoostedReleases(project.id).get_extended_boosted_releases()
return cast(
list[PolymorphicRule],
[
{
"samplingValue": {
"type": "factor",
"value": factor,
},
"type": "trace",
"condition": {
"op": "and",
"inner": [
{
"op": "eq",
"name": "trace.release",
"value": [boosted_release.version],
},
{
"op": "eq",
"name": "trace.environment",
# When environment is None, it will be mapped to equivalent null in json.
# When Relay receives a rule with "value": null it will match it against events without
# the environment tag set.
"value": boosted_release.environment,
},
],
},
"id": RESERVED_IDS[RuleType.BOOST_LATEST_RELEASES_RULE] + idx,
"timeRange": {
"start": datetime.fromtimestamp(boosted_release.timestamp).strftime(
self.datetime_format
),
"end": datetime.fromtimestamp(
boosted_release.timestamp + boosted_release.platform.time_to_adoption
).strftime(self.datetime_format),
},
"decayingFn": {
"type": "linear",
"decayedValue": LATEST_RELEASES_BOOST_DECAYED_FACTOR,
},
}
for idx, boosted_release in enumerate(boosted_releases)
],
)
|
BoostLatestReleasesBias
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/autoVariance3.py
|
{
"start": 3804,
"end": 3961
}
|
class ____[T]:
x: T
# This should generate an error based on variance.
vinv4_1: ShouldBeInvariant4[float] = ShouldBeInvariant4[int](1)
|
ShouldBeInvariant4
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefault5.py
|
{
"start": 355,
"end": 560
}
|
class ____: ...
@overload
def func1(x: ClassA) -> ClassA: ...
@overload
def func1[T1 = str](x: ClassC | T1) -> T1: ...
def func1(x: Any) -> Any: ...
reveal_type(func1(ClassC()), expected_text="str")
|
ClassC
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/validators/actions/test_ticketing.py
|
{
"start": 176,
"end": 993
}
|
class ____(TestCase):
__test__ = False
provider: str
def setUp(self) -> None:
super().setUp()
self.integration, self.org_integration = self.create_provider_integration_for(
provider=self.provider,
organization=self.organization,
user=self.user,
name=self.provider,
)
self.valid_data = {
"type": Action.Type(self.provider),
"config": {},
"data": {},
"integrationId": self.integration.id,
}
def test_validate(self) -> None:
validator = BaseActionValidator(
data=self.valid_data,
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is True
|
BaseTicketingActionValidatorTest
|
python
|
getsentry__sentry
|
tests/sentry/hybridcloud/test_tombstone.py
|
{
"start": 301,
"end": 1486
}
|
class ____(TransactionTestCase):
def test_writing_control_models(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
assert RegionTombstone.objects.count() == 0
user_id = self.user.id
self.organization
with outbox_runner(), assume_test_silo_mode(SiloMode.CONTROL):
self.user.delete()
with assume_test_silo_mode(SiloMode.REGION):
assert RegionTombstone.objects.count() == 1
assert RegionTombstone.objects.filter(
table_name="auth_user", object_identifier=user_id
).exists()
def test_writing_region_models(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlTombstone.objects.count() == 0
org_id = self.organization.id
with outbox_runner(), assume_test_silo_mode(SiloMode.REGION):
self.organization.delete()
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlTombstone.objects.count() == 1
assert ControlTombstone.objects.filter(
table_name="sentry_organization", object_identifier=org_id
).exists()
|
TombstoneTest
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 1899,
"end": 2087
}
|
class ____(str, Enum):
"""
Bulk Action to be taken if the entity already exists or not.
"""
FAIL = "fail"
SKIP = "skip"
OVERWRITE = "overwrite"
|
BulkActionOnExistence
|
python
|
huggingface__transformers
|
src/transformers/cli/serve.py
|
{
"start": 9789,
"end": 11574
}
|
class ____:
"""
A class that holds a PreTrainedModel instance and its associated processor.
Automatically deletes the instances after a specified timeout.
"""
def __init__(
self,
model: "PreTrainedModel",
timeout_seconds: int,
processor: Union["ProcessorMixin", "PreTrainedTokenizerFast"] | None = None,
):
self.model = model
self._name_or_path = str(model.name_or_path)
self.processor = processor
self.timeout_seconds = timeout_seconds
self._timer = threading.Timer(self.timeout_seconds, self.timeout_reached)
self._timer.start()
def reset_timer(self):
"""Reset the timer for the deletion of the instances."""
self._timer.cancel()
self._timer = threading.Timer(self.timeout_seconds, self.timeout_reached)
self._timer.start()
def delete_model(self):
"""Delete the wrapped model and processor and clean up resources."""
if hasattr(self, "model") and self.model is not None:
del self.model
del self.processor
self.model = None
self.processor = None
gc.collect()
# Clear CUDA cache if available
reset_torch_cache()
# XXX: in case we manually delete the model, like on server shutdown
self._timer.cancel()
def timeout_reached(self):
if self.timeout_seconds > 0:
self.delete_model()
logger.info(
f"{self._name_or_path} was removed from memory after {self.timeout_seconds} seconds of inactivity"
)
def is_deleted(self):
"""Check if the instances have been deleted."""
return not hasattr(self, "model") or self.model is None
|
TimedModel
|
python
|
pytransitions__transitions
|
transitions/extensions/states.py
|
{
"start": 382,
"end": 1012
}
|
class ____(State):
"""Allows states to be tagged.
Attributes:
tags (list): A list of tag strings. `State.is_<tag>` may be used
to check if <tag> is in the list.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains `tags`, assign them to the attribute.
"""
self.tags = kwargs.pop('tags', [])
super(Tags, self).__init__(*args, **kwargs)
def __getattr__(self, item):
if item.startswith('is_'):
return item[3:] in self.tags
return super(Tags, self).__getattribute__(item)
|
Tags
|
python
|
Netflix__metaflow
|
metaflow/plugins/cards/card_modules/basic.py
|
{
"start": 20966,
"end": 21751
}
|
class ____(MetaflowCard):
type = "default_json"
def __init__(
self,
options=dict(only_repr=True),
components=[],
graph=None,
flow=None,
**kwargs
):
self._only_repr = True
self._graph = None if graph is None else transform_flow_graph(graph)
self._flow = flow
if "only_repr" in options:
self._only_repr = options["only_repr"]
self._components = components
def render(self, task):
final_component_dict = TaskInfoComponent(
task,
only_repr=self._only_repr,
graph=self._graph,
components=self._components,
flow=self._flow,
).render()
return json.dumps(final_component_dict)
|
DefaultCardJSON
|
python
|
PyCQA__pylint
|
pylint/pyreverse/inspector.py
|
{
"start": 10831,
"end": 11161
}
|
class ____(ABC):
@abstractmethod
def set_next(
self, handler: RelationshipHandlerInterface
) -> RelationshipHandlerInterface:
pass
@abstractmethod
def handle(
self, node: nodes.AssignAttr | nodes.AssignName, parent: nodes.ClassDef
) -> None:
pass
|
RelationshipHandlerInterface
|
python
|
Textualize__textual
|
tests/tree/test_tree_clearing.py
|
{
"start": 370,
"end": 3558
}
|
class ____(App[None]):
"""Tree clearing test app."""
def compose(self) -> ComposeResult:
yield VerseTree("White Sun", data=VerseStar())
def on_mount(self) -> None:
tree = self.query_one(VerseTree)
node = tree.root.add("Londinium", VersePlanet())
node.add_leaf("Balkerne", VerseMoon())
node.add_leaf("Colchester", VerseMoon())
node = tree.root.add("Sihnon", VersePlanet())
node.add_leaf("Airen", VerseMoon())
node.add_leaf("Xiaojie", VerseMoon())
async def test_tree_simple_clear() -> None:
"""Clearing a tree should keep the old root label and data."""
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) > 1
pilot.app.query_one(VerseTree).clear()
assert len(tree.root.children) == 0
assert str(tree.root.label) == "White Sun"
assert isinstance(tree.root.data, VerseStar)
async def test_tree_reset_with_label() -> None:
"""Resetting a tree with a new label should use the new label and set the data to None."""
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) > 1
pilot.app.query_one(VerseTree).reset(label="Jiangyin")
assert len(tree.root.children) == 0
assert str(tree.root.label) == "Jiangyin"
assert tree.root.data is None
async def test_tree_reset_with_label_and_data() -> None:
"""Resetting a tree with a label and data have that label and data used."""
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) > 1
pilot.app.query_one(VerseTree).reset(label="Jiangyin", data=VersePlanet())
assert len(tree.root.children) == 0
assert str(tree.root.label) == "Jiangyin"
assert isinstance(tree.root.data, VersePlanet)
async def test_remove_node():
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) == 2
tree.root.children[0].remove()
assert len(tree.root.children) == 1
async def test_remove_node_children():
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) == 2
assert len(tree.root.children[0].children) == 2
tree.root.children[0].remove_children()
assert len(tree.root.children) == 2
assert len(tree.root.children[0].children) == 0
async def test_tree_remove_children_of_root():
"""Test removing the children of the root."""
async with TreeClearApp().run_test() as pilot:
tree = pilot.app.query_one(VerseTree)
assert len(tree.root.children) > 1
tree.root.remove_children()
assert len(tree.root.children) == 0
async def test_attempt_to_remove_root():
"""Attempting to remove the root should be an error."""
async with TreeClearApp().run_test() as pilot:
with pytest.raises(RemoveRootError):
pilot.app.query_one(VerseTree).root.remove()
|
TreeClearApp
|
python
|
pytorch__pytorch
|
test/distributed/elastic/agent/server/test/api_test.py
|
{
"start": 1240,
"end": 1573
}
|
class ____(unittest.TestCase):
def test_is_running(self):
for state in WorkerState:
if state == WorkerState.HEALTHY or state == WorkerState.UNHEALTHY:
self.assertTrue(WorkerState.is_running(state))
else:
self.assertFalse(WorkerState.is_running(state))
|
WorkerStateTest
|
python
|
urllib3__urllib3
|
test/test_proxymanager.py
|
{
"start": 295,
"end": 3657
}
|
class ____:
@pytest.mark.parametrize("proxy_scheme", ["http", "https"])
def test_proxy_headers(self, proxy_scheme: str) -> None:
url = "http://pypi.org/project/urllib3/"
proxy_url = f"{proxy_scheme}://something:1234"
with ProxyManager(proxy_url) as p:
# Verify default headers
default_headers = {"Accept": "*/*", "Host": "pypi.org"}
headers = p._set_proxy_headers(url)
assert headers == default_headers
# Verify default headers don't overwrite provided headers
provided_headers = {
"Accept": "application/json",
"custom": "header",
"Host": "test.python.org",
}
headers = p._set_proxy_headers(url, provided_headers)
assert headers == provided_headers
# Verify proxy with nonstandard port
provided_headers = {"Accept": "application/json"}
expected_headers = provided_headers.copy()
expected_headers.update({"Host": "pypi.org:8080"})
url_with_port = "http://pypi.org:8080/project/urllib3/"
headers = p._set_proxy_headers(url_with_port, provided_headers)
assert headers == expected_headers
def test_default_port(self) -> None:
with ProxyManager("http://something") as p:
assert p.proxy is not None
assert p.proxy.port == 80
with ProxyManager("https://something") as p:
assert p.proxy is not None
assert p.proxy.port == 443
def test_invalid_scheme(self) -> None:
with pytest.raises(AssertionError):
ProxyManager("invalid://host/p")
with pytest.raises(ValueError):
ProxyManager("invalid://host/p")
def test_proxy_tunnel(self) -> None:
http_url = parse_url("http://example.com")
https_url = parse_url("https://example.com")
with ProxyManager("http://proxy:8080") as p:
assert p._proxy_requires_url_absolute_form(http_url)
assert p._proxy_requires_url_absolute_form(https_url) is False
with ProxyManager("https://proxy:8080") as p:
assert p._proxy_requires_url_absolute_form(http_url)
assert p._proxy_requires_url_absolute_form(https_url) is False
with ProxyManager("https://proxy:8080", use_forwarding_for_https=True) as p:
assert p._proxy_requires_url_absolute_form(http_url)
assert p._proxy_requires_url_absolute_form(https_url)
def test_proxy_connect_retry(self) -> None:
retry = Retry(total=None, connect=False)
port = find_unused_port()
with ProxyManager(f"http://localhost:{port}") as p:
with pytest.raises(ProxyError) as ei:
p.urlopen("HEAD", url="http://localhost/", retries=retry)
assert isinstance(ei.value.original_error, NewConnectionError)
retry = Retry(total=None, connect=2)
with ProxyManager(f"http://localhost:{port}") as p:
with pytest.raises(MaxRetryError) as ei1:
p.urlopen("HEAD", url="http://localhost/", retries=retry)
assert ei1.value.reason is not None
assert isinstance(ei1.value.reason, ProxyError)
assert isinstance(ei1.value.reason.original_error, NewConnectionError)
|
TestProxyManager
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/c/_ast.py
|
{
"start": 61148,
"end": 65877
}
|
class ____(ASTBaseBase):
def __init__(
self,
objectType: str,
directiveType: str | None,
declaration: DeclarationType | ASTFunctionParameter,
semicolon: bool = False,
) -> None:
self.objectType = objectType
self.directiveType = directiveType
self.declaration = declaration
self.semicolon = semicolon
self.symbol: Symbol | None = None
# set by CObject._add_enumerator_to_parent
self.enumeratorScopedSymbol: Symbol | None = None
# the cache assumes that by the time get_newest_id is called, no
# further changes will be made to this object
self._newest_id_cache: str | None = None
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclaration):
return NotImplemented
return (
self.objectType == other.objectType
and self.directiveType == other.directiveType
and self.declaration == other.declaration
and self.semicolon == other.semicolon
and self.symbol == other.symbol
and self.enumeratorScopedSymbol == other.enumeratorScopedSymbol
)
def __hash__(self) -> int:
return hash((
self.objectType,
self.directiveType,
self.declaration,
self.semicolon,
self.symbol,
self.enumeratorScopedSymbol,
))
def clone(self) -> ASTDeclaration:
return ASTDeclaration(
self.objectType,
self.directiveType,
self.declaration.clone(),
self.semicolon,
)
@property
def name(self) -> ASTNestedName:
decl = cast('DeclarationType', self.declaration)
return decl.name
@property
def function_params(self) -> list[ASTFunctionParameter] | None:
if self.objectType != 'function':
return None
decl = cast('ASTType', self.declaration)
return decl.function_params
def get_id(self, version: int, prefixed: bool = True) -> str:
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
id_ = self.declaration.get_id(version, self.objectType, self.symbol)
if prefixed:
return _id_prefix[version] + id_
else:
return id_
def get_newest_id(self) -> str:
if self._newest_id_cache is None:
self._newest_id_cache = self.get_id(_max_id, True)
return self._newest_id_cache
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.declaration)
if self.semicolon:
res += ';'
return res
def describe_signature(
self,
signode: TextElement,
mode: str,
env: BuildEnvironment,
options: dict[str, bool],
) -> None:
verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
main_decl_node = addnodes.desc_signature_line()
main_decl_node.sphinx_line_type = 'declarator'
main_decl_node['add_permalink'] = not self.symbol.isRedeclaration
signode += main_decl_node
if self.objectType in {'member', 'function', 'macro'}:
pass
elif self.objectType == 'struct':
main_decl_node += addnodes.desc_sig_keyword('struct', 'struct')
main_decl_node += addnodes.desc_sig_space()
elif self.objectType == 'union':
main_decl_node += addnodes.desc_sig_keyword('union', 'union')
main_decl_node += addnodes.desc_sig_space()
elif self.objectType == 'enum':
main_decl_node += addnodes.desc_sig_keyword('enum', 'enum')
main_decl_node += addnodes.desc_sig_space()
elif self.objectType == 'enumerator':
main_decl_node += addnodes.desc_sig_keyword('enumerator', 'enumerator')
main_decl_node += addnodes.desc_sig_space()
elif self.objectType == 'type':
decl = cast('ASTType', self.declaration)
prefix = decl.get_type_declaration_prefix()
main_decl_node += addnodes.desc_sig_keyword(prefix, prefix)
main_decl_node += addnodes.desc_sig_space()
else:
raise AssertionError
self.declaration.describe_signature(main_decl_node, mode, env, self.symbol)
if self.semicolon:
main_decl_node += addnodes.desc_sig_punctuation(';', ';')
|
ASTDeclaration
|
python
|
kubernetes-client__python
|
kubernetes/client/models/core_v1_endpoint_port.py
|
{
"start": 383,
"end": 7954
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'app_protocol': 'str',
'name': 'str',
'port': 'int',
'protocol': 'str'
}
attribute_map = {
'app_protocol': 'appProtocol',
'name': 'name',
'port': 'port',
'protocol': 'protocol'
}
def __init__(self, app_protocol=None, name=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
"""CoreV1EndpointPort - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._app_protocol = None
self._name = None
self._port = None
self._protocol = None
self.discriminator = None
if app_protocol is not None:
self.app_protocol = app_protocol
if name is not None:
self.name = name
self.port = port
if protocol is not None:
self.protocol = protocol
@property
def app_protocol(self):
"""Gets the app_protocol of this CoreV1EndpointPort. # noqa: E501
The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:return: The app_protocol of this CoreV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._app_protocol
@app_protocol.setter
def app_protocol(self, app_protocol):
"""Sets the app_protocol of this CoreV1EndpointPort.
The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:param app_protocol: The app_protocol of this CoreV1EndpointPort. # noqa: E501
:type: str
"""
self._app_protocol = app_protocol
@property
def name(self):
"""Gets the name of this CoreV1EndpointPort. # noqa: E501
The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
:return: The name of this CoreV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CoreV1EndpointPort.
The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
:param name: The name of this CoreV1EndpointPort. # noqa: E501
:type: str
"""
self._name = name
@property
def port(self):
"""Gets the port of this CoreV1EndpointPort. # noqa: E501
The port number of the endpoint. # noqa: E501
:return: The port of this CoreV1EndpointPort. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this CoreV1EndpointPort.
The port number of the endpoint. # noqa: E501
:param port: The port of this CoreV1EndpointPort. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def protocol(self):
"""Gets the protocol of this CoreV1EndpointPort. # noqa: E501
The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
:return: The protocol of this CoreV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this CoreV1EndpointPort.
The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
:param protocol: The protocol of this CoreV1EndpointPort. # noqa: E501
:type: str
"""
self._protocol = protocol
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CoreV1EndpointPort):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CoreV1EndpointPort):
return True
return self.to_dict() != other.to_dict()
|
CoreV1EndpointPort
|
python
|
python__mypy
|
mypy/plugin.py
|
{
"start": 7011,
"end": 8476
}
|
class ____:
"""Interface for accessing semantic analyzer functionality in plugins.
Methods docstrings contain only basic info. Look for corresponding implementation
docstrings in typeanal.py for more details.
"""
# An options object. Note: these are the cloned options for the current file.
# This might be different from Plugin.options (that contains default/global options)
# if there are per-file options in the config. This applies to all other interfaces
# in this file.
options: Options
@abstractmethod
def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None:
"""Emit an error message at given location."""
raise NotImplementedError
@abstractmethod
def named_type(self, fullname: str, args: list[Type], /) -> Instance:
"""Construct an instance of a builtin type with given name."""
raise NotImplementedError
@abstractmethod
def analyze_type(self, typ: Type, /) -> Type:
"""Analyze an unbound type using the default mypy logic."""
raise NotImplementedError
@abstractmethod
def analyze_callable_args(
self, arglist: TypeList
) -> tuple[list[Type], list[ArgKind], list[str | None]] | None:
"""Find types, kinds, and names of arguments from extended callable syntax."""
raise NotImplementedError
# A context for a hook that semantically analyzes an unbound type.
|
TypeAnalyzerPluginInterface
|
python
|
SmileyChris__easy-thumbnails
|
easy_thumbnails/tests/test_models.py
|
{
"start": 174,
"end": 2124
}
|
class ____(test.BaseTest):
"""Test for FileManager"""
def setUp(self):
super().setUp()
self.storage = test.TemporaryStorage()
self.storage_hash = utils.get_storage_hash(self.storage)
self.source = Source.objects.create(
name='Test source',
storage_hash=self.storage_hash)
# Generate a test image, save it.
self.filename = self.create_image(self.storage, 'test.jpg')
def tearDown(self):
self.storage.delete_temporary_storage()
super().tearDown()
def test_create_file(self):
"""Create a new Thumbnail in the database"""
img = Thumbnail.objects.get_file(
self.storage,
self.filename,
create=True,
source=self.source)
self.assertEqual(img.name, self.filename)
def test_get_file(self):
"""Fetch an existing thumb from database"""
created = Thumbnail.objects.create(
storage_hash=self.storage_hash,
name=self.filename,
source=self.source)
fetched = Thumbnail.objects.get_file(
self.storage,
self.filename,
create=False)
self.assertTrue(fetched)
self.assertEqual(created, fetched)
def test_get_file_check_cache(self):
"""Fetch a thumb that is in the storage but not in the database"""
# It's not in the database yet
try:
Thumbnail.objects.get(name=self.filename)
self.fail('Thumb should not exist yet')
except Thumbnail.DoesNotExist:
pass
Thumbnail.objects.get_file(
self.storage,
self.filename,
source=self.source,
check_cache_miss=True)
# Now it is
try:
Thumbnail.objects.get(name=self.filename)
except Thumbnail.DoesNotExist:
self.fail('Thumb should exist now')
|
FileManagerTest
|
python
|
google__jax
|
jax/_src/sharding_impls.py
|
{
"start": 6379,
"end": 12330
}
|
class ____(jsharding.Sharding):
"""Describes a sharding used by :func:`jax.pmap`."""
devices: np.ndarray
sharding_spec: sharding_specs.ShardingSpec
_internal_device_list: xc.DeviceList
@use_cpp_method()
def __init__(self, devices: Sequence[Device] | np.ndarray,
sharding_spec: sharding_specs.ShardingSpec):
self.devices = np.asarray(devices)
# The sharding spec should be pmap's sharding spec.
self.sharding_spec = sharding_spec
def __reduce__(self):
return (type(self), (self.devices, self.sharding_spec))
def __eq__(self, other):
if not isinstance(other, PmapSharding):
return False
if self is other:
return True
return (self.sharding_spec == other.sharding_spec and
self.devices.shape == other.devices.shape and
self._internal_device_list == other._internal_device_list)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash((self._internal_device_list, self.sharding_spec))
return self._hash
def __str__(self):
device_ids = [d.id for d in self.devices.flat]
return (f'PmapSharding(sharding_spec={self.sharding_spec}, '
f'{device_ids=}, '
f'device_platform={self.devices.flat[0].platform.upper()}, '
f'device_shape={self.devices.shape})')
def __repr__(self):
return (f'PmapSharding(sharding_spec={self.sharding_spec}, '
f'devices={self.devices})')
def is_equivalent_to(self: PmapSharding, other: PmapSharding, # type: ignore
ndim: int) -> bool:
return self == other
# TODO(yashkatariya): Expose `sharded_dim_size` in the API if required.
@classmethod
def default(cls, shape: Shape, sharded_dim: int | None = 0,
devices: Sequence[xc.Device] | None = None) -> PmapSharding:
"""Creates a :class:`PmapSharding` which matches the default placement
used by :func:`jax.pmap`.
Args:
shape: The shape of the input array.
sharded_dim: Dimension the input array is sharded on. Defaults to 0.
devices: Optional sequence of devices to use. If omitted, the implicit
device order used by pmap is used, which is the order of
:func:`jax.local_devices`.
"""
if sharded_dim is None:
if devices is None:
raise ValueError("One of sharded_dim or devices must be set.")
nrep = len(devices)
return cls(np.array(devices),
sharding_specs.pmap_sharding_spec(nrep, nrep, shape, None))
# The dtype doesn't matter here. Its only used for creating the
# sharding_spec.
sharding_spec = sharding_specs.create_pmap_sharding_spec(
tuple(shape), sharded_dim)
num_ways_sharded = None
for s in sharding_spec.sharding:
if isinstance(s, sharding_specs.Unstacked):
assert num_ways_sharded is None
num_ways_sharded = s.size
elif isinstance(s, sharding_specs.Chunked):
assert num_ways_sharded is None
if len(s.chunks) == 1:
num_ways_sharded = s.chunks[0]
else:
raise NotImplementedError(
'Multiple chunks in Chunked dimension not supported.')
if devices is None:
pmap_devices: np.ndarray = np.array(xb.local_devices()[:num_ways_sharded])
else:
pmap_devices = np.array(devices)
return cls(pmap_devices, sharding_spec)
@property
def num_devices(self) -> int:
return len(self.device_set)
@functools.cached_property
def device_set(self) -> set[Device]:
return set(self.devices.flat)
def devices_indices_map(self, global_shape: Shape) -> Mapping[Device, Index]:
return pmap_sharding_devices_indices_map(self, global_shape)
@functools.cached_property
def _device_assignment(self) -> XLADeviceAssignment:
return tuple(self.devices.flat)
@property
def memory_kind(self) -> str | None:
try:
return self._internal_device_list.default_memory_kind
except:
return None
def with_memory_kind(self, kind: str):
raise NotImplementedError("pmap does not support memories.")
def _to_xla_hlo_sharding(self, num_dimensions: int) -> xc.HloSharding:
raise NotImplementedError("pmap doesn't use OpSharding.")
def _to_sdy_sharding(self, num_dimensions: int) -> SdyArray:
raise NotImplementedError("pmap doesn't use SdyArray.")
@functools.cached_property
def is_fully_replicated(self) -> bool:
for s in self.sharding_spec.sharding:
if isinstance(s, (sharding_specs.Unstacked, sharding_specs.Chunked)):
return False
return True
@functools.cached_property
def is_fully_addressable(self) -> bool:
return self._internal_device_list.is_fully_addressable
def shard_shape(self, global_shape: Shape) -> Shape:
sharded_dim = None
sharded_dim_size = None
for i, s in enumerate(self.sharding_spec.sharding):
if isinstance(s, sharding_specs.Unstacked):
sharded_dim = i
sharded_dim_size = s.size
sharded_shape = util.tuple_delete(global_shape, sharded_dim)
break
elif isinstance(s, sharding_specs.Chunked):
sharded_dim = i
assert len(s.chunks) == 1, s.chunks
sharded_dim_size = s.chunks[0]
sharded_shape = util.tuple_update(global_shape, sharded_dim, 1)
break
if sharded_dim is None:
return global_shape
if global_shape[sharded_dim] != sharded_dim_size:
raise ValueError(
f'The sharded dimension must be equal to the number of '
f'devices passed to PmapSharding. Got sharded dimension {sharded_dim} '
f'with value {global_shape[sharded_dim]} in shape {global_shape} and '
f'the number of devices={len(self._device_assignment)}')
return sharded_shape
PmapSharding.__module__ = 'jax.sharding'
def _unpickle_gspmd_sharding(devices, op_sharding, memory_kind):
return GSPMDSharding(devices, op_sharding, memory_kind=memory_kind)
@use_cpp_class(xc.GSPMDSharding)
|
PmapSharding
|
python
|
mlflow__mlflow
|
mlflow/exceptions.py
|
{
"start": 6279,
"end": 6671
}
|
class ____(MlflowException):
"""
Exception thrown from tracing logic
Tracing logic should not block the main execution flow in general, hence this exception
is used to distinguish tracing related errors and handle them properly.
"""
def __init__(self, message, error_code=INTERNAL_ERROR):
super().__init__(message, error_code=error_code)
|
MlflowTracingException
|
python
|
django__django
|
tests/admin_changelist/admin.py
|
{
"start": 4046,
"end": 4235
}
|
class ____(admin.ModelAdmin):
list_display = ["name", "file", "url"]
list_display_links = ["file", "url"]
site.register(Genre, ListDisplayLinksGenreAdmin)
|
ListDisplayLinksGenreAdmin
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
|
{
"start": 48854,
"end": 51792
}
|
class ____(XLMRobertaXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, QuestionAnsweringModelOutput]:
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
|
XLMRobertaXLForQuestionAnswering
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_type_lookup.py
|
{
"start": 12048,
"end": 12189
}
|
class ____(abc.ABC):
def __init__(self, x): # noqa: B027
pass
@abc.abstractmethod
def qux(self):
pass
|
AbstractFoo
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/common.py
|
{
"start": 64129,
"end": 65643
}
|
class ____:
"""A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis.
To do so, the backends can simply overload `Kernel.create_cse_var`
The "CSEVariable.update_on_args" method gives you a hook for annotations
See example of TritonCSEVariable in triton.py
"""
def __init__(
self,
name: str,
bounds: ValueRanges[Any],
dtype: Optional[torch.dtype] = None,
shape: BlockShapeType = None,
):
super().__init__()
assert isinstance(bounds, ValueRanges), type(bounds)
self.name = name
self.bounds = bounds
self.use_count = 1 # track how many times this expression is used
self.dtype = dtype
self.shape = shape
def __str__(self) -> str:
return self.name
def __hash__(self) -> int:
return hash(self.name)
def __eq__(self, other: object) -> bool:
return isinstance(other, CSEVariable) and other.name == self.name
def update_on_args(self, name: str, args: Any, kwargs: Any) -> None:
pass
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.name!r})"
AugmentedKeyT = TypeVar("AugmentedKeyT", default=str)
CSEVariableType = TypeVar("CSEVariableType", bound=CSEVariable, default=CSEVariable)
if TYPE_CHECKING:
ReductionCacheKey = tuple[
torch.dtype,
ReductionType,
Union[CSEVariable, tuple[CSEVariable, ...]],
]
|
CSEVariable
|
python
|
huggingface__transformers
|
src/transformers/models/doge/modular_doge.py
|
{
"start": 1985,
"end": 11487
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-320M](https://huggingface.co/SmallDoge/Doge-320M).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Doge2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for each sequence transformation and state transformation module.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention.
If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
For more details checkout [this paper](https://huggingface.co/papers/2305.13245).
If it is not specified, will default to `num_attention_heads`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `None`.
keep_window_size (`int`, *optional*, defaults to 2048):
The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value.
is_moe (`bool`, *optional*, defaults to `False`):
Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize.
num_experts (`int`, *optional*, defaults to 16384):
Number of routed experts in the model. This is only used when `is_moe=True`.
num_experts_per_tok (`int`, *optional*, defaults to 64):
Number of selected experts to route per-token.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
```python
>>> from transformers import DogeConfig, DogeModel
>>> # Initializing a Doge-320M style configuration
>>> configuration = DogeConfig()
>>> # Initializing a model from the Doge-320M style configuration
>>> model = DogeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "doge"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `DogeModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.dt_proj": "rowwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.input_layernorm.weight": "sequence_parallel",
"layers.*.input_residual": "sequence_parallel",
"layers.*.post_attention_layernorm.weight": "sequence_parallel",
"layers.*.post_attention_residual": "sequence_parallel",
"norm.weight": "sequence_parallel",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
"layers.*.mlp.router_gate": "colwise_rep",
"layers.*.mlp.down_embed": "rowwise_rep",
"layers.*.mlp.up_embed": "rowwise_rep",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32768,
hidden_size: Optional[int] = 1024,
intermediate_size: Optional[int] = 2048,
num_hidden_layers: Optional[int] = 32,
hidden_dropout: Optional[float] = 0.0,
hidden_act: Optional[str] = "silu",
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-06,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
max_position_embeddings: Optional[int] = 2048,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
num_attention_heads: Optional[int] = 8,
num_key_value_heads: Optional[int] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
sliding_window: Optional[int] = None,
keep_window_size: Optional[int] = 2048,
is_moe: Optional[bool] = False,
num_experts: Optional[int] = 16384,
num_experts_per_tok: Optional[int] = 64,
norm_topk_prob: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.001,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.hidden_dropout = hidden_dropout
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.sliding_window = sliding_window
self.keep_window_size = keep_window_size
self.is_moe = is_moe
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.norm_topk_prob = norm_topk_prob
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.rope_parameters = rope_parameters
# for backward compatibility
if num_key_value_heads is None:
self.num_key_value_heads = num_attention_heads
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
|
DogeConfig
|
python
|
django-guardian__django-guardian
|
guardian/testapp/models.py
|
{
"start": 3002,
"end": 3243
}
|
class ____(models.Model):
"""
Model for testing whether get_objects_for_user will work when the objects to
be returned have varchar primary keys.
"""
char_pk = models.CharField(primary_key=True, max_length=128)
|
CharPKModel
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/graph/Python/chromatic_number.py
|
{
"start": 87,
"end": 3225
}
|
class ____:
"""
Implementation of a graph with Adjacency Matrix
"""
def __init__(self,size,directed = False):
self.size = size
self.matrix = [[0 for i in range(self.size)] for j in range(self.size)]
self.directed = directed
self.Time = 0
self.color = {i:"white" for i in range(self.size)}
self.parent = [-1 for i in range(self.size)]
self.time = [[0,0] for i in range(self.size)]
def has_vertex(self,i):
return i>=0 and i<self.size
def addEdge(self,i,j,weight=1):
if self.has_vertex(i) and self.has_vertex(j):
if self.directed:
self.matrix[i][j] = weight
else:
self.matrix[i][j] = weight
self.matrix[j][i] = weight
def delEdge(self,i,j):
if self.directed:
self.matrix[i][j] = 0
else:
self.matrix[i][j] = 0
self.matrix[j][i] = 0
def adjacent_vertices(self,i):
if self.has_vertex(i):
w = []
for j in range(self.size):
if self.matrix[i][j] != 0:
w.append(j)
return w
def indeg(self):
indegree = [0 for i in range(self.size)]
for i in range(self.size):
l = self.adjacent_vertices(i)
for j in l:
indegree[j] = indegree[j] + 1
return indegree
## Graph Coloring Using Greedy Algorithm
##1. Color first vertex with first color.
##2. Do following for remaining V-1 vertices.
##….. a) Consider the currently picked vertex and color it with the
##lowest numbered color that has not been used on any previously
##colored vertices adjacent to it. If all previously used colors
##appear on vertices adjacent to v, assign a new color to it.
def chromatic_number(self):
# Initially all vertices are uncolored
color = [-1 for i in range(self.size)]
# Initially all colors are available
available_color = [True for i in range(self.size)]
# Color the 0th vertex
color[0] = 0
# Iterate for all other vertices
for u in range(1,self.size):
for v in self.adjacent_vertices(u):
# If the neighbor is colored then make its
# color unavailable
if color[v] != -1:
available_color[color[v]] = False
# Find the first available color for vertices u
c = 0
for i in range(self.size):
if available_color[i]:
c = i
break
# Color the vertex with that color
color[u] = c
# Make all the colors available
available_color = [True for i in range(self.size)]
for i in range(self.size):
print("Color of vertex",i,"is",color[i])
def main():
g = Graph(6)
g.directed = True
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
g.chromatic_number()
if __name__ == '__main__':
main()
|
Graph
|
python
|
python-pillow__Pillow
|
setup.py
|
{
"start": 3154,
"end": 8117
}
|
class ____(Exception):
pass
PLATFORM_MINGW = os.name == "nt" and "GCC" in sys.version
def _dbg(s: str, tp: str | tuple[str, ...] | None = None) -> None:
if DEBUG:
if tp:
print(s % tp)
return
print(s)
def _find_library_dirs_ldconfig() -> list[str]:
# Based on ctypes.util from Python 2
ldconfig = "ldconfig" if shutil.which("ldconfig") else "/sbin/ldconfig"
args: list[str]
env: dict[str, str]
expr: str
if sys.platform.startswith(("linux", "gnu")):
if struct.calcsize("l") == 4:
machine = os.uname()[4] + "-32"
else:
machine = os.uname()[4] + "-64"
mach_map = {
"x86_64-64": "libc6,x86-64",
"ppc64-64": "libc6,64bit",
"sparc64-64": "libc6,64bit",
"s390x-64": "libc6,64bit",
"ia64-64": "libc6,IA-64",
}
abi_type = mach_map.get(machine, "libc6")
# Assuming GLIBC's ldconfig (with option -p)
# Alpine Linux uses musl that can't print cache
args = [ldconfig, "-p"]
expr = rf".*\({abi_type}.*\) => (.*)"
env = dict(os.environ)
env["LC_ALL"] = "C"
env["LANG"] = "C"
elif sys.platform.startswith("freebsd"):
args = [ldconfig, "-r"]
expr = r".* => (.*)"
env = {}
try:
p = subprocess.Popen(
args, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE, env=env, text=True
)
except OSError: # E.g. command not found
return []
data = p.communicate()[0]
dirs = []
for dll in re.findall(expr, data):
dir = os.path.dirname(dll)
if dir not in dirs:
dirs.append(dir)
return dirs
def _add_directory(
path: list[str], subdir: str | None, where: int | None = None
) -> None:
if subdir is None:
return
subdir = os.path.realpath(subdir)
if os.path.isdir(subdir) and subdir not in path:
if where is None:
_dbg("Appending path %s", subdir)
path.append(subdir)
else:
_dbg("Inserting path %s", subdir)
path.insert(where, subdir)
elif subdir in path and where is not None:
path.remove(subdir)
path.insert(where, subdir)
def _find_include_file(self: pil_build_ext, include: str) -> str | None:
for directory in self.compiler.include_dirs:
_dbg("Checking for include file %s in %s", (include, directory))
path = os.path.join(directory, include)
if os.path.isfile(path):
_dbg("Found %s", include)
return path
return None
def _find_library_file(self: pil_build_ext, library: str) -> str | None:
ret = self.compiler.find_library_file(self.compiler.library_dirs, library)
if ret:
_dbg("Found library %s at %s", (library, ret))
else:
_dbg("Couldn't find library %s in %s", (library, self.compiler.library_dirs))
return ret
def _find_include_dir(self: pil_build_ext, dirname: str, include: str) -> bool | str:
for directory in self.compiler.include_dirs:
_dbg("Checking for include file %s in %s", (include, directory))
if os.path.isfile(os.path.join(directory, include)):
_dbg("Found %s in %s", (include, directory))
return True
subdir = os.path.join(directory, dirname)
_dbg("Checking for include file %s in %s", (include, subdir))
if os.path.isfile(os.path.join(subdir, include)):
_dbg("Found %s in %s", (include, subdir))
return subdir
return False
def _cmd_exists(cmd: str) -> bool:
if "PATH" not in os.environ:
return False
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
def _pkg_config(name: str) -> tuple[list[str], list[str]] | None:
command = os.environ.get("PKG_CONFIG", "pkg-config")
for keep_system in (True, False):
try:
command_libs = [command, "--libs-only-L", name]
command_cflags = [command, "--cflags-only-I", name]
stderr = None
if keep_system:
command_libs.append("--keep-system-libs")
command_cflags.append("--keep-system-cflags")
stderr = subprocess.DEVNULL
if not DEBUG:
command_libs.append("--silence-errors")
command_cflags.append("--silence-errors")
libs = re.split(
r"(^|\s+)-L",
subprocess.check_output(command_libs, stderr=stderr)
.decode("utf8")
.strip(),
)[::2][1:]
cflags = re.split(
r"(^|\s+)-I",
subprocess.check_output(command_cflags).decode("utf8").strip(),
)[::2][1:]
return libs, cflags
except Exception:
pass
return None
|
RequiredDependencyException
|
python
|
scipy__scipy
|
scipy/differentiate/tests/test_differentiate.py
|
{
"start": 25257,
"end": 27724
}
|
class ____(JacobianHessianTest):
jh_func = hessian
@pytest.mark.parametrize('shape', [(), (4,), (2, 4)])
def test_example(self, shape, xp):
rng = np.random.default_rng(458912319542)
m = 3
x = xp.asarray(rng.random((m,) + shape), dtype=xp.float64)
res = hessian(optimize.rosen, x)
if shape:
x = xp.reshape(x, (m, -1))
ref = xp.stack([optimize.rosen_hess(xi) for xi in x.T])
ref = xp.moveaxis(ref, 0, -1)
ref = xp.reshape(ref, (m, m,) + shape)
else:
ref = optimize.rosen_hess(x)
xp_assert_close(res.ddf, ref, atol=1e-8)
# # Removed symmetry enforcement; consider adding back in as a feature
# # check symmetry
# for key in ['ddf', 'error', 'nfev', 'success', 'status']:
# assert_equal(res[key], np.swapaxes(res[key], 0, 1))
def test_float32(self, xp):
rng = np.random.default_rng(458912319542)
x = xp.asarray(rng.random(3), dtype=xp.float32)
res = hessian(optimize.rosen, x)
ref = optimize.rosen_hess(x)
mask = (ref != 0)
xp_assert_close(res.ddf[mask], ref[mask])
atol = 1e-2 * xp.abs(xp.min(ref[mask]))
xp_assert_close(res.ddf[~mask], ref[~mask], atol=atol)
def test_nfev(self, xp):
z = xp.asarray([0.5, 0.25])
def f1(z):
x, y = xp.broadcast_arrays(*z)
f1.nfev = f1.nfev + (math.prod(x.shape[2:]) if x.ndim > 2 else 1)
return xp.sin(x) * y ** 3
f1.nfev = 0
res = hessian(f1, z, initial_step=10)
f1.nfev = 0
res00 = hessian(lambda x: f1([x[0], z[1]]), z[0:1], initial_step=10)
assert res.nfev[0, 0] == f1.nfev == res00.nfev[0, 0]
f1.nfev = 0
res11 = hessian(lambda y: f1([z[0], y[0]]), z[1:2], initial_step=10)
assert res.nfev[1, 1] == f1.nfev == res11.nfev[0, 0]
# Removed symmetry enforcement; consider adding back in as a feature
# assert_equal(res.nfev, res.nfev.T) # check symmetry
# assert np.unique(res.nfev).size == 3
@pytest.mark.skip_xp_backends(np_only=True,
reason='Python list input uses NumPy backend')
def test_small_rtol_warning(self, xp):
message = 'The specified `rtol=1e-15`, but...'
with pytest.warns(RuntimeWarning, match=message):
hessian(xp.sin, [1.], tolerances=dict(rtol=1e-15))
|
TestHessian
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/collective_ops_test.py
|
{
"start": 1726,
"end": 2388
}
|
class ____(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
kwargs.pop('ordering_token', None)
return _collective_ops.all_reduce(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
kwargs.pop('ordering_token', None)
return _collective_ops.all_gather(t, group_size, group_key, instance_key,
*args, **kwargs)
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
|
CollectiveOpsV1
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 339891,
"end": 341026
}
|
class ____(Response):
"""
Response of tasks.make_private endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_private"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
MakePrivateResponse
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/error.py
|
{
"start": 6625,
"end": 7135
}
|
class ____(YAMLWarning):
text = """
The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
Use 'load(stream, Loader=spack.vendor.ruamel.yaml.Loader)' explicitly if that is OK.
Alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', spack.vendor.ruamel.yaml.error.UnsafeLoaderWarning)
In most other cases you should consider using 'safe_load(stream)'"""
pass
warnings.simplefilter('once', UnsafeLoaderWarning)
|
UnsafeLoaderWarning
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/auth.py
|
{
"start": 21271,
"end": 22785
}
|
class ____(Response):
"""
Response of auth.revoke_credentials endpoint.
:param revoked: Number of credentials revoked
:type revoked: int
"""
_service = "auth"
_action = "revoke_credentials"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"revoked": {
"description": "Number of credentials revoked",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, revoked: Optional[int] = None, **kwargs: Any) -> None:
super(RevokeCredentialsResponse, self).__init__(**kwargs)
self.revoked = revoked
@schema_property("revoked")
def revoked(self) -> Optional[int]:
return self._property_revoked
@revoked.setter
def revoked(self, value: Optional[int]) -> None:
if value is None:
self._property_revoked = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "revoked", six.integer_types)
self._property_revoked = value
response_mapping = {
LoginRequest: LoginResponse,
CreateCredentialsRequest: CreateCredentialsResponse,
GetCredentialsRequest: GetCredentialsResponse,
EditCredentialsRequest: EditCredentialsResponse,
RevokeCredentialsRequest: RevokeCredentialsResponse,
EditUserRequest: EditUserResponse,
}
|
RevokeCredentialsResponse
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI012.py
|
{
"start": 1041,
"end": 1144
}
|
class ____:
value: int = 0
def __init__():
pass
def function():
pass
pass
|
WithInit
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-cost-to-make-arrays-identical.py
|
{
"start": 67,
"end": 464
}
|
class ____(object):
def minCost(self, arr, brr, k):
"""
:type arr: List[int]
:type brr: List[int]
:type k: int
:rtype: int
"""
def cost():
return sum(abs(x-y) for x, y in itertools.izip(arr, brr))
result = cost()
arr.sort()
brr.sort()
result = min(result, k+cost())
return result
|
Solution
|
python
|
facebookresearch__faiss
|
benchs/bench_fw/utils.py
|
{
"start": 3389,
"end": 3741
}
|
class ____:
def __init__(self, values):
self.values = values
def __le__(self, other):
return all(
v1 <= v2 for v1, v2 in zip(self.values, other.values, strict=True)
)
def __lt__(self, other):
return all(
v1 < v2 for v1, v2 in zip(self.values, other.values, strict=True)
)
|
Cost
|
python
|
PyCQA__pydocstyle
|
src/tests/test_cases/sections.py
|
{
"start": 12420,
"end": 12885
}
|
class ____: # noqa: D203
"""Test class."""
@expect("D417: Missing argument descriptions in the docstring "
"(argument(s) y are missing descriptions in "
"'test_incorrect_indent' docstring)", arg_count=3)
def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407
"""Reproducing issue #437.
Testing this incorrectly indented docstring.
Args:
x: Test argument.
"""
|
TestIncorrectIndent
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 170390,
"end": 172176
}
|
class ____(TestCase):
def test_r_less_than_n(self):
iterable = 'abcdefg'
r = 4
first_index = {}
for index, element in enumerate(combinations(iterable, r)):
actual = mi.combination_index(element, iterable)
expected = first_index.setdefault(element, index)
self.assertEqual(actual, expected)
def test_r_equal_to_n(self):
iterable = 'abcd'
r = len(iterable)
first_index = {}
for index, element in enumerate(combinations(iterable, r=r)):
actual = mi.combination_index(element, iterable)
expected = first_index.setdefault(element, index)
self.assertEqual(actual, expected)
def test_multiplicity(self):
iterable = 'abacba'
r = 3
first_index = {}
for index, element in enumerate(combinations(iterable, r)):
actual = mi.combination_index(element, iterable)
expected = first_index.setdefault(element, index)
self.assertEqual(actual, expected)
def test_null(self):
actual = mi.combination_index(tuple(), [])
expected = 0
self.assertEqual(actual, expected)
def test_long(self):
actual = mi.combination_index((2, 12, 35, 126), range(180))
expected = 2000000
self.assertEqual(actual, expected)
def test_invalid_order(self):
with self.assertRaises(ValueError):
mi.combination_index(tuple('acb'), 'abcde')
def test_invalid_large(self):
with self.assertRaises(ValueError):
mi.combination_index(tuple('abcdefg'), 'abcdef')
def test_invalid_match(self):
with self.assertRaises(ValueError):
mi.combination_index(tuple('axe'), 'abcde')
|
CombinationIndexTests
|
python
|
sympy__sympy
|
sympy/matrices/expressions/special.py
|
{
"start": 5175,
"end": 8021
}
|
class ____(MatrixExpr):
"""
Matrix whose all entries are ones.
Also called "matrix of ones" or "all-ones matrix".
https://en.wikipedia.org/wiki/Matrix_of_ones
Examples
========
>>> from sympy.matrices.expressions import OneMatrix
>>> O = OneMatrix(3, 4)
>>> O.shape
(3, 4)
>>> O.as_explicit()
Matrix([
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
"""
def __new__(cls, m, n, evaluate=False):
m, n = _sympify(m), _sympify(n)
cls._check_dim(m)
cls._check_dim(n)
if evaluate:
condition = Eq(m, 1) & Eq(n, 1)
if condition == True:
return Identity(1)
obj = super().__new__(cls, m, n)
return obj
@property
def shape(self):
return self._args
@property
def is_Identity(self):
return self._is_1x1() == True
def as_explicit(self):
from sympy.matrices.immutable import ImmutableDenseMatrix
return ImmutableDenseMatrix.ones(*self.shape)
def doit(self, **hints):
args = self.args
if hints.get('deep', True):
args = [a.doit(**hints) for a in args]
return self.func(*args, evaluate=True)
def _eval_power(self, exp):
# exp = -1, 0, 1 are already handled at this stage
if self._is_1x1() == True:
return Identity(1)
if (exp < 0) == True:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible")
if ask(Q.integer(exp)):
return self.shape[0] ** (exp - 1) * OneMatrix(*self.shape)
return super()._eval_power(exp)
def _eval_transpose(self):
return OneMatrix(self.cols, self.rows)
def _eval_adjoint(self):
return OneMatrix(self.cols, self.rows)
def _eval_trace(self):
return S.One*self.rows
def _is_1x1(self):
"""Returns true if the matrix is known to be 1x1"""
shape = self.shape
return Eq(shape[0], 1) & Eq(shape[1], 1)
def _eval_determinant(self):
condition = self._is_1x1()
if condition == True:
return S.One
elif condition == False:
return S.Zero
else:
from sympy.matrices.expressions.determinant import Determinant
return Determinant(self)
def _eval_inverse(self):
condition = self._is_1x1()
if condition == True:
return Identity(1)
elif condition == False:
raise NonInvertibleMatrixError("Matrix det == 0; not invertible.")
else:
from .inverse import Inverse
return Inverse(self)
def _eval_as_real_imag(self):
return self, ZeroMatrix(*self.shape)
def _eval_conjugate(self):
return self
def _entry(self, i, j, **kwargs):
return S.One
|
OneMatrix
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/modeling_oneformer.py
|
{
"start": 62405,
"end": 70960
}
|
class ____(nn.Module):
def __init__(self, config: OneFormerConfig, feature_channels):
super().__init__()
self.config = config
# positional encoding
self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True)
self.num_feature_levels = 3
transformer_in_channels = feature_channels[-self.num_feature_levels :]
self.transformer_feature_strides = config.strides[-self.num_feature_levels :]
self.feature_channels = feature_channels
self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim))
# Create input projection layers
if self.num_feature_levels > 1:
input_projections_list = []
for in_channels in transformer_in_channels[::-1]:
input_projections_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
)
self.input_projections = nn.ModuleList(input_projections_list)
else:
self.input_projections = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1),
nn.GroupNorm(32, config.conv_dim),
)
]
)
self.encoder = OneFormerPixelDecoderEncoderOnly(config)
self.mask_projection = nn.Conv2d(
config.conv_dim,
config.mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
self.common_stride = config.common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
for idx, in_channels in enumerate(self.feature_channels[: self.num_fpn_levels]):
lateral_conv = nn.Sequential(
nn.Conv2d(
in_channels,
config.conv_dim,
kernel_size=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
)
output_conv = nn.Sequential(
nn.Conv2d(
config.conv_dim,
config.conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.GroupNorm(32, config.conv_dim),
nn.ReLU(),
)
self.add_module(f"adapter_{idx + 1}", lateral_conv)
self.add_module(f"layer_{idx + 1}", output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
def get_valid_ratio(self, mask, dtype=torch.float32):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(~mask[:, :, 0], 1)
valid_width = torch.sum(~mask[:, 0, :], 1)
valid_ratio_height = valid_height.to(dtype) / height
valid_ratio_width = valid_width.to(dtype) / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def forward(
self,
features,
encoder_outputs=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
position_embeddings_list = []
for level, source in enumerate(features[::-1][: self.num_feature_levels]):
sources.append(self.input_projections[level](source))
position_embeddings_list.append(self.position_embedding(source.shape, source.device, source.dtype))
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources]
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=source_flatten,
attention_mask=mask_flatten,
position_embeddings=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
y = encoder_outputs.last_hidden_state
bs = y.shape[0]
split_size_or_sections = [None] * self.num_feature_levels
for i in range(self.num_feature_levels):
if i < self.num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, feats in enumerate(features[: self.num_fpn_levels][::-1]):
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(feats)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + nn.functional.interpolate(
out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False
)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return OneFormerPixelDecoderOutput(
mask_features=self.mask_projection(out[-1]),
multi_scale_features=multi_scale_features,
attentions=encoder_outputs.attentions,
)
# Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModule with Mask2->One
|
OneFormerPixelDecoder
|
python
|
huggingface__transformers
|
tests/models/lightglue/test_modeling_lightglue.py
|
{
"start": 11984,
"end": 27196
}
|
class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("ETH-CVG/lightglue_superpoint") if is_vision_available() else None
@slow
def test_inference(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager"
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 866
expected_matches_values0 = torch.tensor(
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
dtype=torch.int64,
device=torch_device,
)
expected_matching_scores_values0 = torch.tensor(
[
0.6188,0.7817,0.5686,0.9353,0.9801,0.9193,0.8632,0.9111,0.9821,0.5496,
0.9906,0.8682,0.9679,0.9914,0.9318,0.1910,0.9669,0.3240,0.9971,0.9923,
],
device=torch_device
) # fmt:skip
expected_number_of_matches1 = 140
expected_matches_values1 = torch.tensor(
[14, -1, -1, 15, 17, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11],
dtype=torch.int64,
device=torch_device,
)
expected_matching_scores_values1 = torch.tensor(
[0.3796, 0, 0, 0.3772, 0.4439, 0.2411, 0, 0, 0.0032, 0, 0, 0, 0.2997, 0, 0, 0.6762, 0, 0.8826, 0, 0.5583],
device=torch_device,
)
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_without_early_stop(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager", depth_confidence=1.0
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 134
expected_matches_values0 = torch.tensor(
[-1, -1, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values0 = torch.tensor(
[0.0083, 0, 0.2022, 0.0621, 0, 0.0828, 0, 0, 0.0003, 0, 0, 0, 0.0960, 0, 0, 0.6940, 0, 0.7167, 0, 0.1512]
).to(torch_device)
expected_number_of_matches1 = 862
expected_matches_values1 = torch.tensor(
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values1 = torch.tensor(
[
0.4772,
0.3781,
0.0631,
0.9559,
0.8746,
0.9271,
0.4882,
0.5406,
0.9439,
0.1526,
0.5028,
0.4107,
0.5591,
0.9130,
0.7572,
0.0302,
0.4532,
0.0893,
0.9490,
0.4880,
]
).to(torch_device)
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_without_early_stop_and_keypoint_pruning(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint",
attn_implementation="eager",
depth_confidence=1.0,
width_confidence=1.0,
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 144
expected_matches_values0 = torch.tensor(
[-1, -1, 17, -1, -1, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values0 = torch.tensor(
[
0.0699,
0.0302,
0.3356,
0.0820,
0,
0.2266,
0,
0,
0.0241,
0,
0,
0,
0.1674,
0,
0,
0.8114,
0,
0.8120,
0,
0.2936,
]
).to(torch_device)
expected_number_of_matches1 = 862
expected_matches_values1 = torch.tensor(
[10, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1, 26, -1, 28, 29], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values1 = torch.tensor(
[
0.4772,
0.3781,
0.0631,
0.9559,
0.8746,
0.9271,
0.4882,
0.5406,
0.9439,
0.1526,
0.5028,
0.4107,
0.5591,
0.9130,
0.7572,
0.0302,
0.4532,
0.0893,
0.9490,
0.4880,
]
).to(torch_device)
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_order_with_early_stop(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager"
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
# [[image2, image0], [image1, image1]] -> [[image2, image0], [image2, image0], [image1, image1]]
images = [images[0]] + images # adding a 3rd pair to test batching with early stopping
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches_pair0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_number_of_matches_pair1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_number_of_matches_pair2 = torch.sum(outputs.matches[2][0] != -1).item()
# pair 0 and 1 are the same, so should have the same number of matches
# pair 2 is [image1, image1] so should have more matches than first two pairs
# This ensures that early stopping does not affect the order of the outputs
# See : https://huggingface.co/ETH-CVG/lightglue_superpoint/discussions/6
# The bug made the pairs switch order when early stopping was activated
self.assertTrue(predicted_number_of_matches_pair0 == predicted_number_of_matches_pair1)
self.assertTrue(predicted_number_of_matches_pair0 < predicted_number_of_matches_pair2)
|
LightGlueModelIntegrationTest
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/messages/function.py
|
{
"start": 874,
"end": 2094
}
|
class ____(FunctionMessage, BaseMessageChunk):
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization)."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
FunctionMessageChunk
|
python
|
huggingface__transformers
|
src/transformers/models/idefics/processing_idefics.py
|
{
"start": 4449,
"end": 17959
}
|
class ____(ProcessorMixin):
r"""
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`IdeficsImageProcessor`):
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
tokenizer (`LlamaTokenizerFast`):
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
image_size (`int`, *optional*, defaults to 224):
Image size (assuming a square image)
add_end_of_utterance_token (`str`, *optional*):
The string representation of token representing end of utterance
"""
def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.image_token_id = (
tokenizer.image_token_id
if hasattr(tokenizer, "image_token")
else tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
)
self.default_image_dims = (
self.image_processor.image_num_channels,
self.image_processor.image_size,
self.image_processor.image_size,
)
self.tokenizer_was_trained_with_end_of_utterance_token = (
"<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
)
def __call__(
self,
images: Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]] = None,
text: Union[
TextInput,
PreTokenizedInput,
list[TextInput],
list[PreTokenizedInput],
list[list[TextInput]],
list[list[PreTokenizedInput]],
] = None,
**kwargs: Unpack[IdeficsProcessorKwargs],
) -> BatchFeature:
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
the model was trained on and prepares the image pixel values for the model to process.
Args:
images (`Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]]`):
either a single image or a batched list of images - can be passed in when text contains only text prompts,
in order to use the image-text-to-text behavior.
text (`Union[list[TextInput], [list[list[TextInput]]]]`):
either a single prompt or a batched list of prompts - see the detailed description immediately after
the end of the arguments doc section.
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
The type of tensors to return. Can be one of:
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
Returns:
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
directly passed to `model.generate`
Detailed explanation:
Each entry in `text` is either a text to be passed as is or an image that will be processed.
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
entry into the prompt.
Example:
```python
checkpoint = "HuggingFaceM4/idefics-9b"
processor = AutoProcessor.from_pretrained(checkpoint)
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
img = processor.image_processor.fetch_images([url])[0]
prompts = [
"User:",
img,
"Describe this image.\nAssistant: An image of two kittens in grass.\n",
"User:",
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
"Describe this image.\nAssistant:",
]
inputs = processor(text=prompts, return_tensors="pt")
generated_ids = model.generate(**inputs, max_length=100)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
In this example the `prompts` will be converted into:
```
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant: An image of two kittens in grass.
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant:'
```
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
`pixel_values` dict entry of the return value.
This example also exemplifies that images can be passed as objects or as text urls. It can be seen that the
first image is passed as object and the second one as a url.
To do training do:
```python
image_transform = transforms.Compose(
[
transforms.RandomResizedCrop(
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.ToTensor(),
transforms.Normalize(mean=self.image_mean, std=self.image_std),
]
)
inputs = processor(text=prompts, transform=image_transform, return_tensors="pt")
```
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
"""
if images is None and text is None:
raise ValueError("You need to specify either `text` or `images` and `text`.")
if images is None:
# assuming the user wants to use the old behavior with prompts as the only argument
prompts = text
elif text is not None:
# Assuming image-text-to-text behavior:
# Check if batched images are provided
if not isinstance(images, (list, tuple)):
images = [images]
if isinstance(text, str):
text = [text]
# Check if batched images and text are in the correct format
if isinstance(text, (list, tuple)) and len(text) != len(images):
raise ValueError(
"When providing both images and text arguments, the number of text prompts should be the same as the number of images."
"If you want to have several images per prompt, images should be nested as such: images=[[img1, img2], [img3, img4], ...] for text=[prompt1, prompt2, ...]."
)
# Check that only text is present in the prompts
if not all(isinstance(i, str) for i in text):
raise ValueError("When using the image-text-to-text behavior, the prompts should only contain text.")
if isinstance(images[0], (list, tuple)):
# if nested images, un-nest each sublist and create `prompts`
prompts = [[sample, *image_list] for image_list, sample in zip(images, text)]
else:
prompts = list(zip(images, text))
output_kwargs = self._merge_kwargs(
IdeficsProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
add_eos_token = output_kwargs["text_kwargs"].pop("add_eos_token", False)
add_end_of_utterance_token = output_kwargs["text_kwargs"].pop("add_end_of_utterance_token", None)
# if the value isn't overridden by the user, check if the tokenizer was trained with this token and then use it
if add_end_of_utterance_token is None:
add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
# turn non-batched prompts into batched
if not any(isinstance(i, (list, tuple)) for i in prompts):
prompts = [prompts]
fake_token = "<fake_token_around_image>"
image_token = "<image>"
end_of_utterance_token = "<end_of_utterance>"
def image_tokens(last_was_image):
if last_was_image:
return image_token + fake_token
else:
return fake_token + image_token + fake_token
all_prompts = []
all_images = []
for sample in prompts:
# the model was trained on samples starting with <s>
full_text = f"{self.tokenizer.bos_token}"
# an image can either be an image object in the item or the url, everything else is a verbatim prompt text
image_objects = []
last_was_image = False
last_was_text = False
for i, item in enumerate(sample):
if i > 0:
last_was_text = bool(not last_was_image)
if isinstance(item, str):
item = item.strip(" ")
if is_url(item):
image = self.image_processor.fetch_images(item)
full_text += image_tokens(last_was_image)
image_objects.append(image)
last_was_image = True
else:
# we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
if add_end_of_utterance_token and last_was_text:
full_text += end_of_utterance_token
full_text += item
last_was_image = False
else:
# must be an image obj
full_text += image_tokens(last_was_image)
image_objects.append(item)
last_was_image = True
if add_eos_token:
full_text += self.tokenizer.eos_token
image_objects = self.image_processor(image_objects, **output_kwargs["images_kwargs"])
all_prompts.append(full_text)
all_images.append(image_objects)
# For BC
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", "pt")
text_encoding = self.tokenizer(all_prompts, **output_kwargs["text_kwargs"])
all_texts = text_encoding["input_ids"]
all_attention_masks = text_encoding["attention_mask"]
# max_num_images has to be at least 1 even when there are no images
max_num_images = max(len(x) for x in all_images)
max_num_images = max(1, max_num_images)
at_least_one_image = sum(len(x) for x in all_images) > 0
output_input_ids = []
output_images = []
output_attention_masks = []
for text_single, attention_mask, extracted_images in zip(all_texts, all_attention_masks, all_images):
padded_input_ids = text_single
image_count = padded_input_ids.count(self.image_token_id)
local_max_num_images = min(image_count, max_num_images)
current_images = extracted_images[:local_max_num_images]
if len(current_images) > 0:
if return_tensors == "pt":
padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
padded_image_tensor[: current_images.size(0)] = current_images
else:
if return_tensors == "pt":
padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
output_images.append(padded_image_tensor)
if return_tensors == "pt":
output_input_ids.append(torch.tensor(padded_input_ids))
output_attention_masks.append(torch.tensor(attention_mask))
if return_tensors == "pt":
output_input_ids = torch.stack(output_input_ids)
output_images = torch.stack(output_images)
output_attention_masks = torch.stack(output_attention_masks)
if at_least_one_image:
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(
output_input_ids, self.tokenizer, return_tensors
)
image_attention_mask = incremental_to_binary_attention_mask(
image_attention_mask, return_tensors, num_classes=max_num_images
)
else:
# in full language mode we set the image mask to all-0s
if return_tensors == "pt":
image_attention_mask = torch.zeros(
output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
)
return BatchFeature(
data={
"input_ids": output_input_ids,
"attention_mask": output_attention_masks,
"pixel_values": output_images,
"image_attention_mask": image_attention_mask,
}
)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names + image_processor_input_names + ["image_attention_mask"])
__all__ = ["IdeficsProcessor"]
|
IdeficsProcessor
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/transfers/test_cassandra_to_gcs.py
|
{
"start": 1148,
"end": 5495
}
|
class ____:
@pytest.mark.db_test
def test_execute(self):
test_bucket = TEST_BUCKET
schema = SCHEMA
filename = FILENAME
gzip = True
query_timeout = 20
try:
from airflow.providers.google.cloud.transfers.cassandra_to_gcs import CassandraToGCSOperator
except cassandra.DependencyException:
pytest.skip("cassandra-driver not installed with libev support. Skipping test.")
with (
mock.patch(
"airflow.providers.google.cloud.transfers.cassandra_to_gcs.NamedTemporaryFile"
) as mock_tempfile,
mock.patch(
"airflow.providers.google.cloud.transfers.cassandra_to_gcs.GCSHook.upload"
) as mock_upload,
mock.patch(
"airflow.providers.google.cloud.transfers.cassandra_to_gcs.CassandraHook"
) as mock_hook,
):
mock_tempfile.return_value.name = TMP_FILE_NAME
operator = CassandraToGCSOperator(
task_id=TASK_ID,
cql=CQL,
bucket=test_bucket,
filename=filename,
schema_filename=schema,
gzip=gzip,
query_timeout=query_timeout,
)
operator.execute(None)
mock_hook.return_value.get_conn.assert_called_once_with()
mock_hook.return_value.get_conn.return_value.execute.assert_called_once_with(
"select * from keyspace1.table1",
timeout=20,
)
call_schema = call(
bucket_name=test_bucket,
object_name=schema,
filename=TMP_FILE_NAME,
mime_type="application/json",
gzip=gzip,
)
call_data = call(
bucket_name=test_bucket,
object_name=filename,
filename=TMP_FILE_NAME,
mime_type="application/json",
gzip=gzip,
)
mock_upload.assert_has_calls([call_schema, call_data], any_order=True)
def test_convert_value(self):
try:
from airflow.providers.google.cloud.transfers.cassandra_to_gcs import CassandraToGCSOperator
except cassandra.DependencyException:
pytest.skip("cassandra-driver not installed with libev support. Skipping test.")
op = CassandraToGCSOperator(task_id=TASK_ID, bucket=TEST_BUCKET, cql=CQL, filename=FILENAME)
unencoded_uuid_op = CassandraToGCSOperator(
task_id=TASK_ID, bucket=TEST_BUCKET, cql=CQL, filename=FILENAME, encode_uuid=False
)
assert op.convert_value(None) is None
assert op.convert_value(1) == 1
assert op.convert_value(1.0) == 1.0
assert op.convert_value("text") == "text"
assert op.convert_value(True) is True
assert op.convert_value({"a": "b"}) == {"a": "b"}
from datetime import datetime
now = datetime.now()
assert op.convert_value(now) == str(now)
from cassandra.util import Date
date_str = "2018-01-01"
date = Date(date_str)
assert op.convert_value(date) == str(date_str)
import uuid
from base64 import b64encode
test_uuid = uuid.uuid4()
encoded_uuid = b64encode(test_uuid.bytes).decode("ascii")
assert op.convert_value(test_uuid) == encoded_uuid
unencoded_uuid = str(test_uuid)
assert unencoded_uuid_op.convert_value(test_uuid) == unencoded_uuid
byte_str = b"abc"
encoded_b = b64encode(byte_str).decode("ascii")
assert op.convert_value(byte_str) == encoded_b
from decimal import Decimal
decimal = Decimal(1.0)
assert op.convert_value(decimal) == float(decimal)
from cassandra.util import Time
time = Time(0)
assert op.convert_value(time) == "00:00:00"
date_str_lst = ["2018-01-01", "2018-01-02", "2018-01-03"]
date_lst = [Date(d) for d in date_str_lst]
assert op.convert_value(date_lst) == date_str_lst
date_tpl = tuple(date_lst)
assert op.convert_value(date_tpl) == {
"field_0": "2018-01-01",
"field_1": "2018-01-02",
"field_2": "2018-01-03",
}
|
TestCassandraToGCS
|
python
|
getsentry__sentry-python
|
sentry_sdk/consts.py
|
{
"start": 24547,
"end": 27523
}
|
class ____:
ANTHROPIC_MESSAGES_CREATE = "ai.messages.create.anthropic"
CACHE_GET = "cache.get"
CACHE_PUT = "cache.put"
COHERE_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.cohere"
COHERE_EMBEDDINGS_CREATE = "ai.embeddings.create.cohere"
DB = "db"
DB_REDIS = "db.redis"
EVENT_DJANGO = "event.django"
FUNCTION = "function"
FUNCTION_AWS = "function.aws"
FUNCTION_GCP = "function.gcp"
GEN_AI_CHAT = "gen_ai.chat"
GEN_AI_CREATE_AGENT = "gen_ai.create_agent"
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
GEN_AI_GENERATE_TEXT = "gen_ai.generate_text"
GEN_AI_HANDOFF = "gen_ai.handoff"
GEN_AI_PIPELINE = "gen_ai.pipeline"
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
GEN_AI_RESPONSES = "gen_ai.responses"
GRAPHQL_EXECUTE = "graphql.execute"
GRAPHQL_MUTATION = "graphql.mutation"
GRAPHQL_PARSE = "graphql.parse"
GRAPHQL_RESOLVE = "graphql.resolve"
GRAPHQL_SUBSCRIPTION = "graphql.subscription"
GRAPHQL_QUERY = "graphql.query"
GRAPHQL_VALIDATE = "graphql.validate"
GRPC_CLIENT = "grpc.client"
GRPC_SERVER = "grpc.server"
HTTP_CLIENT = "http.client"
HTTP_CLIENT_STREAM = "http.client.stream"
HTTP_SERVER = "http.server"
MIDDLEWARE_DJANGO = "middleware.django"
MIDDLEWARE_LITESTAR = "middleware.litestar"
MIDDLEWARE_LITESTAR_RECEIVE = "middleware.litestar.receive"
MIDDLEWARE_LITESTAR_SEND = "middleware.litestar.send"
MIDDLEWARE_STARLETTE = "middleware.starlette"
MIDDLEWARE_STARLETTE_RECEIVE = "middleware.starlette.receive"
MIDDLEWARE_STARLETTE_SEND = "middleware.starlette.send"
MIDDLEWARE_STARLITE = "middleware.starlite"
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
"ai.chat_completions.create.huggingface_hub"
)
QUEUE_PROCESS = "queue.process"
QUEUE_PUBLISH = "queue.publish"
QUEUE_SUBMIT_ARQ = "queue.submit.arq"
QUEUE_TASK_ARQ = "queue.task.arq"
QUEUE_SUBMIT_CELERY = "queue.submit.celery"
QUEUE_TASK_CELERY = "queue.task.celery"
QUEUE_TASK_RQ = "queue.task.rq"
QUEUE_SUBMIT_HUEY = "queue.submit.huey"
QUEUE_TASK_HUEY = "queue.task.huey"
QUEUE_SUBMIT_RAY = "queue.submit.ray"
QUEUE_TASK_RAY = "queue.task.ray"
QUEUE_TASK_DRAMATIQ = "queue.task.dramatiq"
SUBPROCESS = "subprocess"
SUBPROCESS_WAIT = "subprocess.wait"
SUBPROCESS_COMMUNICATE = "subprocess.communicate"
TEMPLATE_RENDER = "template.render"
VIEW_RENDER = "view.render"
VIEW_RESPONSE_RENDER = "view.response.render"
WEBSOCKET_SERVER = "websocket.server"
SOCKET_CONNECTION = "socket.connection"
SOCKET_DNS = "socket.dns"
MCP_SERVER = "mcp.server"
# This type exists to trick mypy and PyCharm into thinking `init` and `Client`
# take these arguments (even though they take opaque **kwargs)
|
OP
|
python
|
PrefectHQ__prefect
|
tests/server/models/deprecated/test_work_queues.py
|
{
"start": 17571,
"end": 28145
}
|
class ____:
async def setup_work_queues_and_deployment(
self, session, flow, flow_function, tags=[]
):
"""
Create combinations of work queues, and a deployment to make sure that query is working correctly.
Returns the ID of the deployment that was created and a random ID that was provided to work queues
for testing purposes.
"""
deployment = (
await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="My Deployment",
flow_id=flow.id,
tags=tags,
),
),
)
match_id = deployment[0].id
miss_id = uuid4()
tags = [ # "a" and "b" are matches and "y" and "z" are misses
[],
["a"],
["z"],
["a", "b"],
["a", "z"],
["y", "z"],
]
deployments = [
[],
[match_id],
[miss_id],
[match_id, miss_id],
]
# Generate all combinations of work queues
for t in tags:
for d in deployments:
await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(
name=f"{t}:{d}",
filter=schemas.core.QueueFilter(tags=t, deployment_ids=d),
),
)
# return the two IDs needed to compare results
return match_id, miss_id
async def assert_queues_found(self, session, deployment_id, desired_queues):
queues = await check_work_queues_for_deployment(
session=session, deployment_id=deployment_id
)
# default work queue for work pool is made without a filter
actual_queue_attrs = [
[q.filter.tags, q.filter.deployment_ids]
for q in queues
if q.name != "default"
]
for q in desired_queues:
assert q in actual_queue_attrs
async def test_object_not_found_error_raised(self, session):
with pytest.raises(ObjectNotFoundError):
await check_work_queues_for_deployment(
session=session, deployment_id=uuid4()
)
# NO TAG DEPLOYMENTS with no-tag queues
async def test_no_tag_picks_up_no_filter_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function
)
match_q = [[[], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_no_tag_picks_up_no_tags_no_runners_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function
)
match_q = [
[[], [match_id]],
[[], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
async def test_no_tag_picks_up_no_tags_no_id_with_runners_match(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function
)
match_q = [
[[], []],
[[], []],
]
await self.assert_queues_found(session, match_id, match_q)
async def test_no_tag_picks_up_no_tags_with_id_and_runners_match(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function
)
match_q = [
[[], [match_id]],
[[], [match_id, miss_id]],
[[], [match_id]],
[[], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
async def test_no_tag_picks_up_only_number_of_expected_queues(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function
)
actual_queues = await check_work_queues_for_deployment(
session=session, deployment_id=match_id
)
connection_url = PREFECT_API_DATABASE_CONNECTION_URL.value()
dialect = get_dialect(connection_url)
if dialect.name == "postgresql":
assert len(actual_queues) == 3
else:
# sqlite picks up the default queue because it has no filter
assert len(actual_queues) == 4
# ONE TAG DEPLOYMENTS with no-tag queues
async def test_one_tag_picks_up_no_filter_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a"]
)
match_q = [[[], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_one_tag_picks_up_no_tags_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a"]
)
match_q = [
[[], [match_id]],
[[], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
# ONE TAG DEPLOYMENTS with one-tag queues
async def test_one_tag_picks_up_one_tag_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a"]
)
match_q = [[["a"], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_one_tag_picks_up_one_tag_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a"]
)
match_q = [
[["a"], [match_id]],
[["a"], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
async def test_one_tag_picks_up_only_number_of_expected_queues(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a"]
)
actual_queues = await check_work_queues_for_deployment(
session=session, deployment_id=match_id
)
connection_url = PREFECT_API_DATABASE_CONNECTION_URL.value()
dialect = get_dialect(connection_url)
if dialect.name == "postgresql":
assert len(actual_queues) == 6
else:
# sqlite picks up the default queue because it has no filter
assert len(actual_queues) == 7
# TWO TAG DEPLOYMENTS with no-tag queues
async def test_two_tag_picks_up_no_filter_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [[[], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_two_tag_picks_up_no_tags_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [
[[], [match_id]],
[[], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
# TWO TAG DEPLOYMENTS with one-tag queues
async def test_two_tag_picks_up_one_tag_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [[["a"], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_two_tag_picks_up_one_tag_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [
[["a"], [match_id]],
[["a"], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
# TWO TAG DEPLOYMENTS with two-tag queues
async def test_two_tag_picks_up_two_tag_q(self, session, flow, flow_function):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [[["a", "b"], []]]
await self.assert_queues_found(session, match_id, match_q)
async def test_two_tag_picks_up_two_tag_with_id_match_q(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
match_q = [
[["a", "b"], [match_id]],
[["a", "b"], [match_id, miss_id]],
]
await self.assert_queues_found(session, match_id, match_q)
async def test_two_tag_picks_up_only_number_of_expected_queues(
self, session, flow, flow_function
):
match_id, miss_id = await self.setup_work_queues_and_deployment(
session=session, flow=flow, flow_function=flow_function, tags=["a", "b"]
)
actual_queues = await check_work_queues_for_deployment(
session=session, deployment_id=match_id
)
connection_url = PREFECT_API_DATABASE_CONNECTION_URL.value()
dialect = get_dialect(connection_url)
if dialect.name == "postgresql":
assert len(actual_queues) == 9
else:
# sqlite picks up the default queue because it has no filter
assert len(actual_queues) == 10
|
TestCheckWorkQueuesForDeployment
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/events.py
|
{
"start": 69636,
"end": 70692
}
|
class ____(Response):
"""
Response of events.delete_for_model endpoint.
:param deleted: Number of deleted events
:type deleted: bool
"""
_service = "events"
_action = "delete_for_model"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Number of deleted events",
"type": ["boolean", "null"],
}
},
"type": "object",
}
def __init__(self, deleted: Optional[bool] = None, **kwargs: Any) -> None:
super(DeleteForModelResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self) -> Optional[bool]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[bool]) -> None:
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
|
DeleteForModelResponse
|
python
|
huggingface__transformers
|
src/transformers/cache_utils.py
|
{
"start": 62500,
"end": 62919
}
|
class ____(StaticCache):
def __init__(self, config: PreTrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once(
"`HybridChunkedCache` is deprecated and will be removed in version v4.59 "
"Use `StaticCache(...)` instead which will correctly infer the type of each layer."
)
super().__init__(config=config, max_cache_len=max_cache_len)
|
HybridChunkedCache
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py
|
{
"start": 5927,
"end": 5985
}
|
class ____(SamLayerNorm):
pass
|
DeepseekVLHybridLayerNorm
|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/_available_if.py
|
{
"start": 155,
"end": 2945
}
|
class ____:
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if check(self) returns a falsey value. Note that if check raises an error
this will also result in hasattr returning false.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, check, attribute_name):
self.fn = fn
self.check = check
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def _check(self, obj, owner):
attr_err_msg = (
f"This {owner.__name__!r} has no attribute {self.attribute_name!r}"
)
try:
check_result = self.check(obj)
except Exception as e:
raise AttributeError(attr_err_msg) from e
if not check_result:
raise AttributeError(attr_err_msg)
def __get__(self, obj, owner=None):
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self._check(obj, owner=owner)
out = MethodType(self.fn, obj)
else:
# This makes it possible to use the decorated method as an unbound method,
# for instance when monkeypatching.
@wraps(self.fn)
def out(*args, **kwargs):
self._check(args[0], owner=owner)
return self.fn(*args, **kwargs)
return out
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
|
_AvailableIfDescriptor
|
python
|
realpython__materials
|
build-a-blog-from-scratch-django/django-blog/blog/apps.py
|
{
"start": 36,
"end": 140
}
|
class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "blog"
|
BlogConfig
|
python
|
python__mypy
|
mypy/test/testsubtypes.py
|
{
"start": 316,
"end": 12426
}
|
class ____(Suite):
def setUp(self) -> None:
self.fx = TypeFixture(INVARIANT)
self.fx_contra = TypeFixture(CONTRAVARIANT)
self.fx_co = TypeFixture(COVARIANT)
def test_trivial_cases(self) -> None:
for simple in self.fx_co.a, self.fx_co.o, self.fx_co.b:
self.assert_subtype(simple, simple)
def test_instance_subtyping(self) -> None:
self.assert_strict_subtype(self.fx.a, self.fx.o)
self.assert_strict_subtype(self.fx.b, self.fx.o)
self.assert_strict_subtype(self.fx.b, self.fx.a)
self.assert_not_subtype(self.fx.a, self.fx.d)
self.assert_not_subtype(self.fx.b, self.fx.c)
def test_simple_generic_instance_subtyping_invariant(self) -> None:
self.assert_subtype(self.fx.ga, self.fx.ga)
self.assert_subtype(self.fx.hab, self.fx.hab)
self.assert_not_subtype(self.fx.ga, self.fx.g2a)
self.assert_not_subtype(self.fx.ga, self.fx.gb)
self.assert_not_subtype(self.fx.gb, self.fx.ga)
def test_simple_generic_instance_subtyping_covariant(self) -> None:
self.assert_subtype(self.fx_co.ga, self.fx_co.ga)
self.assert_subtype(self.fx_co.hab, self.fx_co.hab)
self.assert_not_subtype(self.fx_co.ga, self.fx_co.g2a)
self.assert_not_subtype(self.fx_co.ga, self.fx_co.gb)
self.assert_subtype(self.fx_co.gb, self.fx_co.ga)
def test_simple_generic_instance_subtyping_contravariant(self) -> None:
self.assert_subtype(self.fx_contra.ga, self.fx_contra.ga)
self.assert_subtype(self.fx_contra.hab, self.fx_contra.hab)
self.assert_not_subtype(self.fx_contra.ga, self.fx_contra.g2a)
self.assert_subtype(self.fx_contra.ga, self.fx_contra.gb)
self.assert_not_subtype(self.fx_contra.gb, self.fx_contra.ga)
def test_generic_subtyping_with_inheritance_invariant(self) -> None:
self.assert_subtype(self.fx.gsab, self.fx.gb)
self.assert_not_subtype(self.fx.gsab, self.fx.ga)
self.assert_not_subtype(self.fx.gsaa, self.fx.gb)
def test_generic_subtyping_with_inheritance_covariant(self) -> None:
self.assert_subtype(self.fx_co.gsab, self.fx_co.gb)
self.assert_subtype(self.fx_co.gsab, self.fx_co.ga)
self.assert_not_subtype(self.fx_co.gsaa, self.fx_co.gb)
def test_generic_subtyping_with_inheritance_contravariant(self) -> None:
self.assert_subtype(self.fx_contra.gsab, self.fx_contra.gb)
self.assert_not_subtype(self.fx_contra.gsab, self.fx_contra.ga)
self.assert_subtype(self.fx_contra.gsaa, self.fx_contra.gb)
def test_interface_subtyping(self) -> None:
self.assert_subtype(self.fx.e, self.fx.f)
self.assert_equivalent(self.fx.f, self.fx.f)
self.assert_not_subtype(self.fx.a, self.fx.f)
def test_generic_interface_subtyping(self) -> None:
# TODO make this work
fx2 = InterfaceTypeFixture()
self.assert_subtype(fx2.m1, fx2.gfa)
self.assert_not_subtype(fx2.m1, fx2.gfb)
self.assert_equivalent(fx2.gfa, fx2.gfa)
def test_basic_callable_subtyping(self) -> None:
self.assert_strict_subtype(
self.fx.callable(self.fx.o, self.fx.d), self.fx.callable(self.fx.a, self.fx.d)
)
self.assert_strict_subtype(
self.fx.callable(self.fx.d, self.fx.b), self.fx.callable(self.fx.d, self.fx.a)
)
self.assert_strict_subtype(
self.fx.callable(self.fx.a, UninhabitedType()), self.fx.callable(self.fx.a, self.fx.a)
)
self.assert_unrelated(
self.fx.callable(self.fx.a, self.fx.a, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a),
)
def test_default_arg_callable_subtyping(self) -> None:
self.assert_strict_subtype(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.a, self.fx.d, self.fx.a),
)
self.assert_strict_subtype(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a),
)
self.assert_strict_subtype(
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
)
self.assert_unrelated(
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.d, self.fx.d, self.fx.a),
)
self.assert_unrelated(
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable_default(1, self.fx.a, self.fx.a, self.fx.a),
)
self.assert_unrelated(
self.fx.callable_default(1, self.fx.a, self.fx.a),
self.fx.callable(self.fx.a, self.fx.a, self.fx.a),
)
def test_var_arg_callable_subtyping_1(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
self.fx.callable_var_arg(0, self.fx.b, self.fx.a),
)
def test_var_arg_callable_subtyping_2(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a),
)
def test_var_arg_callable_subtyping_3(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a), self.fx.callable(self.fx.a)
)
def test_var_arg_callable_subtyping_4(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(1, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a),
)
def test_var_arg_callable_subtyping_5(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.d, self.fx.a),
self.fx.callable(self.fx.b, self.fx.a),
)
def test_var_arg_callable_subtyping_6(self) -> None:
self.assert_strict_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.f, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.e, self.fx.d),
)
def test_var_arg_callable_subtyping_7(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
self.fx.callable(self.fx.a, self.fx.d),
)
def test_var_arg_callable_subtyping_8(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d),
)
self.assert_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d),
)
def test_var_arg_callable_subtyping_9(self) -> None:
self.assert_not_subtype(
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d),
self.fx.callable_var_arg(0, self.fx.a, self.fx.d),
)
self.assert_subtype(
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d),
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
)
def test_type_callable_subtyping(self) -> None:
self.assert_subtype(self.fx.callable_type(self.fx.d, self.fx.a), self.fx.type_type)
self.assert_strict_subtype(
self.fx.callable_type(self.fx.d, self.fx.b), self.fx.callable(self.fx.d, self.fx.a)
)
self.assert_strict_subtype(
self.fx.callable_type(self.fx.a, self.fx.b), self.fx.callable(self.fx.a, self.fx.b)
)
def test_type_var_tuple(self) -> None:
self.assert_subtype(Instance(self.fx.gvi, []), Instance(self.fx.gvi, []))
self.assert_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
Instance(self.fx.gvi, [self.fx.b, self.fx.a]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b]), Instance(self.fx.gvi, [self.fx.a])
)
self.assert_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [UnpackType(self.fx.us)]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]), Instance(self.fx.gvi, [])
)
self.assert_not_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]), Instance(self.fx.gvi, [self.fx.anyt])
)
def test_type_var_tuple_with_prefix_suffix(self) -> None:
self.assert_subtype(
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
)
self.assert_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [self.fx.b, UnpackType(self.fx.ss)]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
)
self.assert_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.b]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a, self.fx.b]),
)
self.assert_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
)
self.assert_not_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss), self.fx.b, self.fx.c]),
)
def test_type_var_tuple_unpacked_variable_length_tuple(self) -> None:
self.assert_subtype(
Instance(self.fx.gvi, [self.fx.a, self.fx.a]),
Instance(self.fx.gvi, [UnpackType(Instance(self.fx.std_tuplei, [self.fx.a]))]),
)
def test_fallback_not_subtype_of_tuple(self) -> None:
self.assert_not_subtype(self.fx.a, TupleType([self.fx.b], fallback=self.fx.a))
# IDEA: Maybe add these test cases (they are tested pretty well in type
# checker tests already):
# * more interface subtyping test cases
# * more generic interface subtyping test cases
# * type variables
# * tuple types
# * None type
# * any type
# * generic function types
def assert_subtype(self, s: Type, t: Type) -> None:
assert is_subtype(s, t), f"{s} not subtype of {t}"
def assert_not_subtype(self, s: Type, t: Type) -> None:
assert not is_subtype(s, t), f"{s} subtype of {t}"
def assert_strict_subtype(self, s: Type, t: Type) -> None:
self.assert_subtype(s, t)
self.assert_not_subtype(t, s)
def assert_equivalent(self, s: Type, t: Type) -> None:
self.assert_subtype(s, t)
self.assert_subtype(t, s)
def assert_unrelated(self, s: Type, t: Type) -> None:
self.assert_not_subtype(s, t)
self.assert_not_subtype(t, s)
|
SubtypingSuite
|
python
|
doocs__leetcode
|
solution/2600-2699/2614.Prime In Diagonal/Solution.py
|
{
"start": 0,
"end": 487
}
|
class ____:
def diagonalPrime(self, nums: List[List[int]]) -> int:
def is_prime(x: int) -> bool:
if x < 2:
return False
return all(x % i for i in range(2, int(sqrt(x)) + 1))
n = len(nums)
ans = 0
for i, row in enumerate(nums):
if is_prime(row[i]):
ans = max(ans, row[i])
if is_prime(row[n - i - 1]):
ans = max(ans, row[n - i - 1])
return ans
|
Solution
|
python
|
scipy__scipy
|
scipy/stats/_multivariate.py
|
{
"start": 148565,
"end": 149638
}
|
class ____(multi_rv_frozen):
__class_getitem__ = None
def __init__(self, dim=None, seed=None):
"""Create a frozen O(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import ortho_group
>>> g = ortho_group(5)
>>> x = g.rvs()
""" # numpy/numpydoc#87 # noqa: E501
self._dist = ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
|
ortho_group_frozen
|
python
|
pytorch__pytorch
|
test/distributed/optim/test_zero_redundancy_optimizer.py
|
{
"start": 1823,
"end": 11298
}
|
class ____(TestZeroRedundancyOptimizer):
def test_state_dict(self):
"""Check that ZeroRedundancyOptimizer exposes the expected state dict
interface, irrespective of the sharding."""
self.create_pg(self.device)
LR1 = 0.1
LR2 = 0.01
MOMENTUM = 0.9
RECIPIENT_RANK = 0 # rank 0 is the only rank since the world size is 1
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x],
optimizer_class=SGD,
lr=LR1,
momentum=MOMENTUM,
)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=self.device))
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.0], device=self.device),
)
o.zero_grad()
o.consolidate_state_dict(to=RECIPIENT_RANK)
state_dict = o.state_dict()
# Check that the state dict has keys compliant with PyTorch
self.assertIn("param_groups", state_dict.keys())
self.assertIn("state", state_dict.keys())
# Check that the state has the expected keys
self.assertEqual(state_dict["param_groups"][0]["lr"], 0.1)
self.assertEqual(state_dict["param_groups"][0]["momentum"], 0.9)
self.assertFalse(state_dict["param_groups"][0]["nesterov"])
self.assertEqual(state_dict["param_groups"][0]["weight_decay"], 0.0)
self.assertEqual(state_dict["param_groups"][0]["dampening"], 0.0)
# Check that the state and the `param_groups` attribute are in sync
for k in state_dict["param_groups"][0]:
if k != "params":
self.assertEqual(
state_dict["param_groups"][0][k],
o.param_groups[0][k],
)
# Check that the state is reloaded with the correct values and device
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=LR2)
o.load_state_dict(state_dict)
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.0], device=self.device),
)
# We should we using `LR1` and not `LR2` after reloading, both within
# the optimizer and as exposed by the `param_groups` attribute
self.assertEqual(o.param_groups[0]["lr"], LR1)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.71], device=self.device))
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.9], device=self.device),
)
# Check that the exposed `param_groups`` are on the proper device
self.assertEqual(o.param_groups[0]["params"][0].device, x.device)
def test_lr_scheduler(self):
"""Check that a normal PyTorch ``lr_scheduler`` is usable with
ZeroRedundancyOptimizer."""
self.create_pg(self.device)
NUM_ITERS = 5
LR = 0.01
x = torch.tensor([1.0], device=self.device, requires_grad=True)
x2 = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=LR)
o2 = torch.optim.SGD([x2], lr=LR)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(NUM_ITERS):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_step_with_kwargs(self):
"""Check that the ``step(**kwargs)`` interface is properly exposed."""
self.create_pg(self.device)
LR = 0.1
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=None):
super().step()
kwarg.append(5)
kwarg: list[Any] = []
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x],
optimizer_class=SGDWithStepKWArg,
lr=LR,
)
x.backward()
o.step(0, kwarg=kwarg)
self.assertEqual(kwarg, [5])
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_step_with_extra_inner_key(self):
"""Check that ZeroRedundancyOptimizer wrapping an optimizer that adds
extra keys to ``param_groups`` exposes those keys through ZeRO's own
``param_groups``."""
self.create_pg(self.device)
LR = 0.1
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithNewKey, lr=LR)
x.backward()
o.step()
self.assertEqual(o.param_groups[0]["new_key"], 0.1)
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_step_without_closure(self):
"""Check that the ``step()`` method (without closure) is handled as
expected."""
self.create_pg(self.device)
LR = 0.1
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x],
optimizer_class=SGDWithoutClosure,
lr=LR,
)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_zero_grad(self):
"""Check that the ``zero_grad`` method is properly handled."""
self.create_pg(self.device)
LR = 0.01
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = ZeroRedundancyOptimizer(m.parameters(), optimizer_class=SGD, lr=LR)
y = m(x)
y.backward(x)
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
o.zero_grad()
self.assertIsNone(m.weight.grad)
self.assertIsNone(m.bias.grad)
def test_constructor(self):
"""Check the robustness of the ZeroRedundancyOptimizer constructor by
passing different values for the ``params`` argument."""
self.create_pg(self.device)
LR = 0.01
m = torch.nn.Sequential(
torch.nn.Linear(5, 10),
torch.nn.Linear(10, 10),
torch.nn.Linear(10, 10),
)
# Test various constructor inputs in the form: (input, expected error)
ctor_inputs = [
([], ValueError), # empty parameter list
(torch.randn(1), TypeError), # non-iterable: `torch.Tensor`
(1.2, TypeError), # non-iterable: `float`
(
[
{"params": [l.weight for l in m]},
{"params": [l.bias for l in m]},
],
None,
), # iterable of dict
(
list(m.parameters()) + [42],
TypeError,
), # iterable containing invalid type
(m.parameters(), None), # `params` as a generator
(list(m.parameters()), None), # `params` as a list
]
for ctor_input, error in ctor_inputs:
context = self.assertRaises(error) if error else nullcontext()
with context:
ZeroRedundancyOptimizer(
ctor_input,
optimizer_class=SGD,
lr=LR,
)
# Test constructing with multiple parameter groups more thoroughly
WD = 0.01
BETAS = (0.9, 0.999)
EPS = 1e-8
params = [
{"params": [l.weight for l in m], "weight_decay": 0.0},
{"params": [l.bias for l in m], "weight_decay": WD},
]
o = ZeroRedundancyOptimizer(
params,
optimizer_class=AdamW,
lr=LR,
betas=BETAS,
eps=EPS,
)
assert len(o.param_groups) == 2, (
f"Expected 2 ZeRO param groups, but got {len(o.param_groups)}"
)
assert len(o.optim.param_groups) == 2, (
"Expected 2 local optimizer param groups, but got "
f"{len(o.optim.param_groups)}"
)
def test_same_dense_param_type(self):
"""Check that ZeroRedundancyOptimizer raises an exception if the input
parameters include sparse tensors or different dense types.
NOTE: This test should be removed once support for sparse parameters
and varying parameter types is added.
"""
self.create_pg(self.device)
LR = 0.01
inputs = [
[torch.sparse_coo_tensor(size=(2, 3))],
[torch.FloatTensor(1), torch.DoubleTensor(1)],
[
torch.FloatTensor(1),
torch.FloatTensor(1),
torch.sparse_coo_tensor(size=(2, 3)),
],
]
for input in inputs:
with self.assertRaises(ValueError):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=LR)
|
TestZeroRedundancyOptimizerSingleRank
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.