language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__thrift
|
lib/py/src/protocol/TBase.py
|
{
"start": 828,
"end": 2042
}
|
class ____(object):
__slots__ = ()
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key)) for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
def read(self, iprot):
if (iprot._fast_decode is not None and
isinstance(iprot.trans, TTransport.CReadableTransport) and
self.thrift_spec is not None):
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
else:
iprot.readStruct(self, self.thrift_spec)
def write(self, oprot):
if (oprot._fast_encode is not None and self.thrift_spec is not None):
oprot.trans.write(
oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
else:
oprot.writeStruct(self, self.thrift_spec)
|
TBase
|
python
|
walkccc__LeetCode
|
solutions/973. K Closest Points to Origin/973-3.py
|
{
"start": 0,
"end": 935
}
|
class ____:
def kClosest(self, points: list[list[int]], k: int) -> list[list[int]]:
def squareDist(p: list[int]) -> int:
return p[0] * p[0] + p[1] * p[1]
def quickSelect(l: int, r: int, k: int) -> None:
randIndex = random.randint(0, r - l + 1) + l
points[randIndex], points[r] = points[r], points[randIndex]
pivot = points[r]
nextSwapped = l
for i in range(l, r):
if squareDist(points[i]) <= squareDist(pivot):
points[nextSwapped], points[i] = points[i], points[nextSwapped]
nextSwapped += 1
points[nextSwapped], points[r] = points[r], points[nextSwapped]
count = nextSwapped - l + 1 the number of points <= pivot
if count == k:
return
if count > k:
quickSelect(l, nextSwapped - 1, k)
else:
quickSelect(nextSwapped + 1, r, k - count)
quickSelect(0, len(points) - 1, k)
return points[0:k]
|
Solution
|
python
|
scikit-learn__scikit-learn
|
sklearn/model_selection/_split.py
|
{
"start": 61821,
"end": 63871
}
|
class ____(_UnsupportedGroupCVMixin, _RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold `n_repeats` times with different randomization in each repetition.
Read more in the :ref:`User Guide <repeated_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Controls the randomness of each repeated cross-validation instance.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> rkf.get_n_splits()
4
>>> print(rkf)
RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124)
>>> for i, (train_index, test_index) in enumerate(rkf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
...
Fold 0:
Train: index=[0 1]
Test: index=[2 3]
Fold 1:
Train: index=[2 3]
Test: index=[0 1]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits
)
|
RepeatedKFold
|
python
|
graphql-python__graphene
|
graphene/types/objecttype.py
|
{
"start": 1636,
"end": 5802
}
|
class ____(BaseType, metaclass=ObjectTypeMeta):
"""
Object Type Definition
Almost all of the GraphQL types you define will be object types. Object types
have a name, but most importantly describe their fields.
The name of the type defined by an _ObjectType_ defaults to the class name. The type
description defaults to the class docstring. This can be overridden by adding attributes
to a Meta inner class.
The class attributes of an _ObjectType_ are mounted as instances of ``graphene.Field``.
Methods starting with ``resolve_<field_name>`` are bound as resolvers of the matching Field
name. If no resolver is provided, the default resolver is used.
Ambiguous types with Interface and Union can be determined through ``is_type_of`` method and
``Meta.possible_types`` attribute.
.. code:: python
from graphene import ObjectType, String, Field
class Person(ObjectType):
class Meta:
description = 'A human'
# implicitly mounted as Field
first_name = String()
# explicitly mounted as Field
last_name = Field(String)
def resolve_last_name(parent, info):
return last_name
ObjectType must be mounted using ``graphene.Field``.
.. code:: python
from graphene import ObjectType, Field
class Query(ObjectType):
person = Field(Person, description="My favorite person")
Meta class options (optional):
name (str): Name of the GraphQL type (must be unique in schema). Defaults to class
name.
description (str): Description of the GraphQL type in the schema. Defaults to class
docstring.
interfaces (Iterable[graphene.Interface]): GraphQL interfaces to extend with this object.
all fields from interface will be included in this object's schema.
possible_types (Iterable[class]): Used to test parent value object via isinstance to see if
this type can be used to resolve an ambiguous type (interface, union).
default_resolver (any Callable resolver): Override the default resolver for this
type. Defaults to graphene default resolver which returns an attribute or dictionary
key with the same name as the field.
fields (Dict[str, graphene.Field]): Dictionary of field name to Field. Not recommended to
use (prefer class attributes).
An _ObjectType_ can be used as a simple value object by creating an instance of the class.
.. code:: python
p = Person(first_name='Bob', last_name='Roberts')
assert p.first_name == 'Bob'
Args:
*args (List[Any]): Positional values to use for Field values of value object
**kwargs (Dict[str: Any]): Keyword arguments to use for Field values of value object
"""
@classmethod
def __init_subclass_with_meta__(
cls,
interfaces=(),
possible_types=(),
default_resolver=None,
_meta=None,
**options,
):
if not _meta:
_meta = ObjectTypeOptions(cls)
fields = {}
for interface in interfaces:
assert issubclass(
interface, Interface
), f'All interfaces of {cls.__name__} must be a subclass of Interface. Received "{interface}".'
fields.update(interface._meta.fields)
for base in reversed(cls.__mro__):
fields.update(yank_fields_from_attrs(base.__dict__, _as=Field))
assert not (possible_types and cls.is_type_of), (
f"{cls.__name__}.Meta.possible_types will cause type collision with {cls.__name__}.is_type_of. "
"Please use one or other."
)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
if not _meta.interfaces:
_meta.interfaces = interfaces
_meta.possible_types = possible_types
_meta.default_resolver = default_resolver
super(ObjectType, cls).__init_subclass_with_meta__(_meta=_meta, **options)
is_type_of = None
|
ObjectType
|
python
|
walkccc__LeetCode
|
solutions/2058. Find the Minimum and Maximum Number of Nodes Between Critical Points/2058.py
|
{
"start": 0,
"end": 781
}
|
class ____:
def nodesBetweenCriticalPoints(self, head: ListNode | None) -> list[int]:
minDistance = math.inf
firstMaIndex = -1
prevMaIndex = -1
index = 1
prev = head # Point to the index 0.
curr = head.next # Point to the index 1.
while curr.next:
if (curr.val > prev.val and curr.val > curr.next.val or
curr.val < prev.val and curr.val < curr.next.val):
if firstMaIndex == -1: # Only assign once.
firstMaIndex = index
if prevMaIndex != -1:
minDistance = min(minDistance, index - prevMaIndex)
prevMaIndex = index
prev = curr
curr = curr.next
index += 1
if minDistance == math.inf:
return [-1, -1]
return [minDistance, prevMaIndex - firstMaIndex]
|
Solution
|
python
|
ray-project__ray
|
python/ray/data/expressions.py
|
{
"start": 6544,
"end": 17112
}
|
class ____(ABC):
"""Base class for all expression nodes.
This is the abstract base class that all expression types inherit from.
It provides operator overloads for building complex expressions using
standard Python operators.
Expressions form a tree structure where each node represents an operation
or value. The tree can be evaluated against data batches to compute results.
Example:
>>> from ray.data.expressions import col, lit
>>> # Create an expression tree: (col("x") + 5) * col("y")
>>> expr = (col("x") + lit(5)) * col("y")
>>> # This creates a BinaryExpr with operation=MUL
>>> # left=BinaryExpr(op=ADD, left=ColumnExpr("x"), right=LiteralExpr(5))
>>> # right=ColumnExpr("y")
Note:
This class should not be instantiated directly. Use the concrete
subclasses like ColumnExpr, LiteralExpr, etc.
"""
data_type: DataType
@property
def name(self) -> str | None:
"""Get the name associated with this expression.
Returns:
The name for expressions that have one (ColumnExpr, AliasExpr),
None otherwise.
"""
return None
@abstractmethod
def structurally_equals(self, other: Any) -> bool:
"""Compare two expression ASTs for structural equality."""
raise NotImplementedError
def to_pyarrow(self) -> "pyarrow.compute.Expression":
"""Convert this Ray Data expression to a PyArrow compute expression.
Returns:
A PyArrow compute expression equivalent to this Ray Data expression.
Raises:
ValueError: If the expression contains operations not supported by PyArrow.
TypeError: If the expression type cannot be converted to PyArrow.
"""
return _PyArrowExpressionVisitor().visit(self)
def __repr__(self) -> str:
"""Return a tree-structured string representation of the expression.
Returns:
A multi-line string showing the expression tree structure using
box-drawing characters for visual clarity.
Example:
>>> from ray.data.expressions import col, lit
>>> expr = (col("x") + lit(5)) * col("y")
>>> print(expr)
MUL
βββ left: ADD
β βββ left: COL('x')
β βββ right: LIT(5)
βββ right: COL('y')
"""
from ray.data._internal.planner.plan_expression.expression_visitors import (
_TreeReprVisitor,
)
return _TreeReprVisitor().visit(self)
def _bin(self, other: Any, op: Operation) -> "Expr":
"""Create a binary expression with the given operation.
Args:
other: The right operand expression or literal value
op: The operation to perform
Returns:
A new BinaryExpr representing the operation
Note:
If other is not an Expr, it will be automatically converted to a LiteralExpr.
"""
if not isinstance(other, Expr):
other = LiteralExpr(other)
return BinaryExpr(op, self, other)
# arithmetic
def __add__(self, other: Any) -> "Expr":
"""Addition operator (+)."""
return self._bin(other, Operation.ADD)
def __radd__(self, other: Any) -> "Expr":
"""Reverse addition operator (for literal + expr)."""
return LiteralExpr(other)._bin(self, Operation.ADD)
def __sub__(self, other: Any) -> "Expr":
"""Subtraction operator (-)."""
return self._bin(other, Operation.SUB)
def __rsub__(self, other: Any) -> "Expr":
"""Reverse subtraction operator (for literal - expr)."""
return LiteralExpr(other)._bin(self, Operation.SUB)
def __mul__(self, other: Any) -> "Expr":
"""Multiplication operator (*)."""
return self._bin(other, Operation.MUL)
def __rmul__(self, other: Any) -> "Expr":
"""Reverse multiplication operator (for literal * expr)."""
return LiteralExpr(other)._bin(self, Operation.MUL)
def __truediv__(self, other: Any) -> "Expr":
"""Division operator (/)."""
return self._bin(other, Operation.DIV)
def __rtruediv__(self, other: Any) -> "Expr":
"""Reverse division operator (for literal / expr)."""
return LiteralExpr(other)._bin(self, Operation.DIV)
def __floordiv__(self, other: Any) -> "Expr":
"""Floor division operator (//)."""
return self._bin(other, Operation.FLOORDIV)
def __rfloordiv__(self, other: Any) -> "Expr":
"""Reverse floor division operator (for literal // expr)."""
return LiteralExpr(other)._bin(self, Operation.FLOORDIV)
# comparison
def __gt__(self, other: Any) -> "Expr":
"""Greater than operator (>)."""
return self._bin(other, Operation.GT)
def __lt__(self, other: Any) -> "Expr":
"""Less than operator (<)."""
return self._bin(other, Operation.LT)
def __ge__(self, other: Any) -> "Expr":
"""Greater than or equal operator (>=)."""
return self._bin(other, Operation.GE)
def __le__(self, other: Any) -> "Expr":
"""Less than or equal operator (<=)."""
return self._bin(other, Operation.LE)
def __eq__(self, other: Any) -> "Expr":
"""Equality operator (==)."""
return self._bin(other, Operation.EQ)
def __ne__(self, other: Any) -> "Expr":
"""Not equal operator (!=)."""
return self._bin(other, Operation.NE)
# boolean
def __and__(self, other: Any) -> "Expr":
"""Logical AND operator (&)."""
return self._bin(other, Operation.AND)
def __or__(self, other: Any) -> "Expr":
"""Logical OR operator (|)."""
return self._bin(other, Operation.OR)
def __invert__(self) -> "Expr":
"""Logical NOT operator (~)."""
return UnaryExpr(Operation.NOT, self)
# predicate methods
def is_null(self) -> "Expr":
"""Check if the expression value is null."""
return UnaryExpr(Operation.IS_NULL, self)
def is_not_null(self) -> "Expr":
"""Check if the expression value is not null."""
return UnaryExpr(Operation.IS_NOT_NULL, self)
def is_in(self, values: Union[List[Any], "Expr"]) -> "Expr":
"""Check if the expression value is in a list of values."""
if not isinstance(values, Expr):
values = LiteralExpr(values)
return self._bin(values, Operation.IN)
def not_in(self, values: Union[List[Any], "Expr"]) -> "Expr":
"""Check if the expression value is not in a list of values."""
if not isinstance(values, Expr):
values = LiteralExpr(values)
return self._bin(values, Operation.NOT_IN)
def alias(self, name: str) -> "Expr":
"""Rename the expression.
This method allows you to assign a new name to an expression result.
This is particularly useful when you want to specify the output column name
directly within the expression rather than as a separate parameter.
Args:
name: The new name for the expression
Returns:
An AliasExpr that wraps this expression with the specified name
Example:
>>> from ray.data.expressions import col, lit
>>> # Create an expression with a new aliased name
>>> expr = (col("price") * col("quantity")).alias("total")
>>> # Can be used with Dataset operations that support named expressions
"""
return AliasExpr(
data_type=self.data_type, expr=self, _name=name, _is_rename=False
)
@property
def list(self) -> "_ListNamespace":
"""Access list operations for this expression.
Returns:
A _ListNamespace that provides list-specific operations.
Example:
>>> from ray.data.expressions import col
>>> import ray
>>> ds = ray.data.from_items([
... {"items": [1, 2, 3]},
... {"items": [4, 5]}
... ])
>>> ds = ds.with_column("num_items", col("items").list.len())
>>> ds = ds.with_column("first_item", col("items").list[0])
>>> ds = ds.with_column("slice", col("items").list[1:3])
"""
from ray.data.namespace_expressions.list_namespace import _ListNamespace
return _ListNamespace(self)
@property
def str(self) -> "_StringNamespace":
"""Access string operations for this expression.
Returns:
A _StringNamespace that provides string-specific operations.
Example:
>>> from ray.data.expressions import col
>>> import ray
>>> ds = ray.data.from_items([
... {"name": "Alice"},
... {"name": "Bob"}
... ])
>>> ds = ds.with_column("upper_name", col("name").str.upper())
>>> ds = ds.with_column("name_len", col("name").str.len())
>>> ds = ds.with_column("starts_a", col("name").str.starts_with("A"))
"""
from ray.data.namespace_expressions.string_namespace import _StringNamespace
return _StringNamespace(self)
@property
def struct(self) -> "_StructNamespace":
"""Access struct operations for this expression.
Returns:
A _StructNamespace that provides struct-specific operations.
Example:
>>> from ray.data.expressions import col
>>> import ray
>>> import pyarrow as pa
>>> ds = ray.data.from_arrow(pa.table({
... "user": pa.array([
... {"name": "Alice", "age": 30}
... ], type=pa.struct([
... pa.field("name", pa.string()),
... pa.field("age", pa.int32())
... ]))
... }))
>>> ds = ds.with_column("age", col("user").struct["age"]) # doctest: +SKIP
"""
from ray.data.namespace_expressions.struct_namespace import _StructNamespace
return _StructNamespace(self)
@property
def dt(self) -> "_DatetimeNamespace":
"""Access datetime operations for this expression."""
from ray.data.namespace_expressions.dt_namespace import _DatetimeNamespace
return _DatetimeNamespace(self)
def _unalias(self) -> "Expr":
return self
@DeveloperAPI(stability="alpha")
@dataclass(frozen=True, eq=False, repr=False)
|
Expr
|
python
|
pytest-dev__pytest
|
src/_pytest/config/__init__.py
|
{
"start": 7103,
"end": 11924
}
|
class ____: # compatibility namespace
main = staticmethod(main)
def filename_arg(path: str, optname: str) -> str:
"""Argparse type validator for filename arguments.
:path: Path of filename.
:optname: Name of the option.
"""
if os.path.isdir(path):
raise UsageError(f"{optname} must be a filename, given: {path}")
return path
def directory_arg(path: str, optname: str) -> str:
"""Argparse type validator for directory arguments.
:path: Path of directory.
:optname: Name of the option.
"""
if not os.path.isdir(path):
raise UsageError(f"{optname} must be a directory, given: {path}")
return path
# Plugins that cannot be disabled via "-p no:X" currently.
essential_plugins = (
"mark",
"main",
"runner",
"fixtures",
"helpconfig", # Provides -p.
)
default_plugins = (
*essential_plugins,
"python",
"terminal",
"debugging",
"unittest",
"capture",
"skipping",
"legacypath",
"tmpdir",
"monkeypatch",
"recwarn",
"pastebin",
"assertion",
"junitxml",
"doctest",
"cacheprovider",
"setuponly",
"setupplan",
"stepwise",
"unraisableexception",
"threadexception",
"warnings",
"logging",
"reports",
"faulthandler",
"subtests",
)
builtin_plugins = {
*default_plugins,
"pytester",
"pytester_assertions",
"terminalprogress",
}
def get_config(
args: Iterable[str] | None = None,
plugins: Sequence[str | _PluggyPlugin] | None = None,
) -> Config:
# Subsequent calls to main will create a fresh instance.
pluginmanager = PytestPluginManager()
invocation_params = Config.InvocationParams(
args=args or (),
plugins=plugins,
dir=pathlib.Path.cwd(),
)
config = Config(pluginmanager, invocation_params=invocation_params)
if invocation_params.args:
# Handle any "-p no:plugin" args.
pluginmanager.consider_preparse(invocation_params.args, exclude_only=True)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager() -> PytestPluginManager:
"""Obtain a new instance of the
:py:class:`pytest.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(
args: list[str] | os.PathLike[str],
plugins: Sequence[str | _PluggyPlugin] | None = None,
) -> Config:
if isinstance(args, os.PathLike):
args = [os.fspath(args)]
elif not isinstance(args, list):
msg = ( # type:ignore[unreachable]
"`args` parameter expected to be a list of strings, got: {!r} (type: {})"
)
raise TypeError(msg.format(args, type(args)))
initial_config = get_config(args, plugins)
pluginmanager = initial_config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, str):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
config: Config = pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args
)
return config
except BaseException:
initial_config._ensure_unconfigure()
raise
def _get_directory(path: pathlib.Path) -> pathlib.Path:
"""Get the directory of a path - itself if already a directory."""
if path.is_file():
return path.parent
else:
return path
def _get_legacy_hook_marks(
method: Any,
hook_type: str,
opt_names: tuple[str, ...],
) -> dict[str, bool]:
if TYPE_CHECKING:
# abuse typeguard from importlib to avoid massive method type union that's lacking an alias
assert inspect.isroutine(method)
known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])}
must_warn: list[str] = []
opts: dict[str, bool] = {}
for opt_name in opt_names:
opt_attr = getattr(method, opt_name, AttributeError)
if opt_attr is not AttributeError:
must_warn.append(f"{opt_name}={opt_attr}")
opts[opt_name] = True
elif opt_name in known_marks:
must_warn.append(f"{opt_name}=True")
opts[opt_name] = True
else:
opts[opt_name] = False
if must_warn:
hook_opts = ", ".join(must_warn)
message = _pytest.deprecated.HOOK_LEGACY_MARKING.format(
type=hook_type,
fullname=method.__qualname__,
hook_opts=hook_opts,
)
warn_explicit_for(cast(FunctionType, method), message)
return opts
@final
|
cmdline
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker/modular_sam3_tracker.py
|
{
"start": 4681,
"end": 4763
}
|
class ____(Sam2ImageSegmentationOutput):
pass
|
Sam3TrackerImageSegmentationOutput
|
python
|
scipy__scipy
|
scipy/constants/tests/test_constants.py
|
{
"start": 3563,
"end": 3923
}
|
class ____:
def test_lambda_to_nu(self, xp):
xp_assert_equal(sc.lambda2nu(xp.asarray([sc.speed_of_light, 1])),
xp.asarray([1, sc.speed_of_light]))
def test_lambda_to_nu_array_like(self):
xp_assert_close(sc.lambda2nu([sc.speed_of_light, 1]), [1, sc.speed_of_light])
@make_xp_test_case(sc.nu2lambda)
|
TestLambdaToNu
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/model_query_cache.py
|
{
"start": 1122,
"end": 1283
}
|
class ____(Table):
def attribute_x(self):
return 0
def attribute_z(self):
return 0
def non_attribute_t(self):
return 0
|
BarTable
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/tslibs/timestamp.py
|
{
"start": 3312,
"end": 3602
}
|
class ____:
def setup(self):
dt = datetime(2016, 3, 27, 1, fold=0)
self.tzinfo = dt.astimezone(zoneinfo.ZoneInfo("Europe/Berlin")).tzinfo
self.ts2 = Timestamp(dt)
def time_replace_across_dst(self):
self.ts2.replace(tzinfo=self.tzinfo)
|
TimestampAcrossDst
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linalg_grad_test.py
|
{
"start": 2039,
"end": 3520
}
|
class ____(test_lib.TestCase):
pass # Filled in below
# TODO(b/417809163): re-enable this test when upstream issues are resolved
# see commit msg for details
# def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
#
# @test_util.enable_control_flow_v2
# @test_util.run_in_graph_and_eager_modes(use_gpu=True)
# @test_util.run_without_tensor_float_32(
# 'Tests `tf.linalg.expm`, which call matmul. Additionally, calls ops '
# 'which do matmul in their gradient, such as MatrixSolve.')
# def Test(self):
# def RandomInput():
# np.random.seed(1)
# return np.random.uniform(
# low=-1.0, high=1.0,
# size=np.prod(shape_)).reshape(shape_).astype(dtype_)
# if functor_.__name__ == 'matrix_square_root':
# # Square the input matrix to ensure that its matrix square root exists
# f = lambda x: functor_(math_ops.matmul(x, x), **kwargs_)
# else:
# f = functor_
# # Optimal stepsize for central difference is O(epsilon^{1/3}).
# epsilon = np.finfo(dtype_).eps
# delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
# tol = 1e-6 if dtype_ == np.float64 else 0.05
# theoretical, numerical = gradient_checker_v2.compute_gradient(
# f, [RandomInput()], delta=delta)
# self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
# return Test
|
MatrixUnaryFunctorGradientTest
|
python
|
RaRe-Technologies__gensim
|
gensim/topic_coherence/text_analysis.py
|
{
"start": 8211,
"end": 8869
}
|
class ____(InvertedIndexBased):
"""Gather word occurrence stats from a corpus by iterating over its BoW representation."""
def analyze_text(self, text, doc_num=None):
"""Build an inverted index from a sequence of corpus texts."""
doc_words = frozenset(x[0] for x in text)
top_ids_in_doc = self.relevant_ids.intersection(doc_words)
for word_id in top_ids_in_doc:
self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)
def accumulate(self, corpus):
for document in corpus:
self.analyze_text(document)
self.num_docs += 1
return self
|
CorpusAccumulator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/io_ops/parsing_ops_test.py
|
{
"start": 88087,
"end": 91107
}
|
class ____(test.TestCase):
def _testRoundTrip(self, examples):
examples = np.array(examples, dtype=np.object_)
json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
dtype=dtypes.string)
binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = self.evaluate(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
def testEmptyTensor(self):
self._testRoundTrip([])
self._testRoundTrip([[], [], []])
def testEmptyExamples(self):
self._testRoundTrip([example(), example(), example()])
def testDenseFeaturesScalar(self):
self._testRoundTrip(
example(features=features({"a": float_feature([1, 1, 3])})))
def testDenseFeaturesVector(self):
self._testRoundTrip([
example(features=features({"a": float_feature([1, 1, 3])})),
example(features=features({"a": float_feature([-1, -1, 2])})),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
[example(features=features({"a": float_feature([1, 1, 3])}))],
[example(features=features({"a": float_feature([-1, -1, 2])}))],
])
def testSparseFeatures(self):
self._testRoundTrip([
example(features=features({"st_c": float_feature([3, 4])})),
example(features=features({"st_c": float_feature([])})),
example(features=features({"st_d": feature()})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
})),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"])
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"])
})),
])
def testInvalidSyntax(self):
json_tensor = constant_op.constant(["{]"])
if context.executing_eagerly():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Error while parsing JSON"):
parsing_ops.decode_json_example(json_tensor)
else:
binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
self.evaluate(binary_tensor)
|
DecodeJSONExampleTest
|
python
|
joke2k__faker
|
tests/providers/test_currency.py
|
{
"start": 16788,
"end": 17213
}
|
class ____:
"""Test tr_TR currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.tr_TR import Provider as TrTrCurrencyProvider
cls.provider = TrTrCurrencyProvider
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
|
TestTrTr
|
python
|
doocs__leetcode
|
solution/0700-0799/0795.Number of Subarrays with Bounded Maximum/Solution2.py
|
{
"start": 0,
"end": 684
}
|
class ____:
def numSubarrayBoundedMax(self, nums: List[int], left: int, right: int) -> int:
n = len(nums)
l, r = [-1] * n, [n] * n
stk = []
for i, v in enumerate(nums):
while stk and nums[stk[-1]] <= v:
stk.pop()
if stk:
l[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and nums[stk[-1]] < nums[i]:
stk.pop()
if stk:
r[i] = stk[-1]
stk.append(i)
return sum(
(i - l[i]) * (r[i] - i) for i, v in enumerate(nums) if left <= v <= right
)
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 161254,
"end": 162922
}
|
class ____(torch.nn.Module):
def forward(self, L_inputs_: "f32[64, 3]", L_targets_: "f32[64, 3]"):
l_inputs_ = L_inputs_
l_targets_ = L_targets_
prediction: "f32[64, 3]" = self.model(l_inputs_); l_inputs_ = None
mse_loss: "f32[]" = torch.nn.functional.mse_loss(prediction, l_targets_); prediction = l_targets_ = None
return (mse_loss,)
""",
)
@config.patch(inline_inbuilt_nn_modules=True)
def test_functional_call_sequential_params_and_buffers(self):
# copied from test/test_stateless.py
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
self.register_buffer("buffer", torch.ones(1))
self.foo = 0.0
def forward(self, x):
return self.l1(x) + self.buffer
def wrapper_fn(model, params, buffers, inputs):
# two separate dictionaries
return torch.func.functional_call(model, (params, buffers), inputs)
model = MockModule()
params = dict(model.named_parameters())
buffers = dict(model.named_buffers())
inputs = torch.tensor([[1.5]])
wrapped_gm = self._compile_check(
wrapper_fn, (model, params, buffers, inputs), fullgraph=False
)
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
if torch._dynamo.config.inline_inbuilt_nn_modules:
expected = """\
|
GraphModule
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-intercom/components.py
|
{
"start": 6150,
"end": 7045
}
|
class ____(DefaultErrorHandler):
"""
The difference between the built-in `DefaultErrorHandler` and this one is the custom decorator,
applied on top of `interpret_response` to preserve the api calls for a defined amount of time,
calculated using the rate limit headers and not use the custom backoff strategy,
since we deal with Response.status_code == 200,
the default requester's logic doesn't allow to handle the status of 200 with `should_retry()`.
"""
# The RateLimiter is applied to balance the api requests.
@IntercomRateLimiter.balance_rate_limit()
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]]) -> ErrorResolution:
# Check for response.headers to define the backoff time before the next api call
return super().interpret_response(response_or_exception)
|
ErrorHandlerWithRateLimiter
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/text.py
|
{
"start": 530,
"end": 664
}
|
class ____(Parameter):
"""Editable string, displayed as large text box in the tree."""
itemClass = TextParameterItem
|
TextParameter
|
python
|
great-expectations__great_expectations
|
great_expectations/compatibility/not_imported.py
|
{
"start": 665,
"end": 2442
}
|
class ____:
def __init__(self, message: str) -> None:
self.__dict__["gx_error_message"] = message
def __getattr__(self, attr: str) -> NoReturn:
raise ModuleNotFoundError(self.__dict__["gx_error_message"])
@override
def __setattr__(self, key: str, value: Any) -> NoReturn:
raise ModuleNotFoundError(self.__dict__["gx_error_message"])
def __call__(self, *args, **kwargs) -> NoReturn:
raise ModuleNotFoundError(self.__dict__["gx_error_message"])
@override
def __str__(self) -> str:
return self.__dict__["gx_error_message"]
def __bool__(self) -> Literal[False]:
return False
def is_version_greater_or_equal(version: str | Version, compare_version: str | Version) -> bool:
"""Check if the version is greater or equal to the compare_version.
Args:
version: Current version.
compare_version: Version to compare to.
Returns:
Boolean indicating if the version is greater or equal to the compare version.
"""
if isinstance(version, str):
version = Version(version)
if isinstance(compare_version, str):
compare_version = Version(compare_version)
return version >= compare_version
def is_version_less_than(version: str | Version, compare_version: str | Version) -> bool:
"""Check if the version is less than the compare_version.
Args:
version: Current version.
compare_version: Version to compare to.
Returns:
Boolean indicating if the version is less than the compare version.
"""
if isinstance(version, str):
version = Version(version)
if isinstance(compare_version, str):
compare_version = Version(compare_version)
return version < compare_version
|
NotImported
|
python
|
openai__openai-python
|
src/openai/types/beta/chatkit/chat_session_workflow_param.py
|
{
"start": 257,
"end": 391
}
|
class ____(TypedDict, total=False):
enabled: bool
"""Whether tracing is enabled during the session. Defaults to true."""
|
Tracing
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/operators/task_pool_map_operator.py
|
{
"start": 503,
"end": 7428
}
|
class ____(MapOperator):
"""A MapOperator implementation that executes tasks on a task pool."""
def __init__(
self,
map_transformer: MapTransformer,
input_op: PhysicalOperator,
data_context: DataContext,
name: str = "TaskPoolMap",
target_max_block_size_override: Optional[int] = None,
min_rows_per_bundle: Optional[int] = None,
ref_bundler: Optional[BaseRefBundler] = None,
max_concurrency: Optional[int] = None,
supports_fusion: bool = True,
map_task_kwargs: Optional[Dict[str, Any]] = None,
ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
):
"""Create an TaskPoolMapOperator instance.
Args:
transform_fn: The function to apply to each ref bundle input.
input_op: Operator generating input data for this op.
name: The name of this operator.
target_max_block_size_override: Override for target max-block-size.
min_rows_per_bundle: The number of rows to gather per batch passed to the
transform_fn, or None to use the block size. Setting the batch size is
important for the performance of GPU-accelerated transform functions.
The actual rows passed may be less if the dataset is small.
ref_bundler: The ref bundler to use for this operator.
max_concurrency: The maximum number of Ray tasks to use concurrently,
or None to use as many tasks as possible.
supports_fusion: Whether this operator supports fusion with other operators.
map_task_kwargs: A dictionary of kwargs to pass to the map task. You can
access these kwargs through the `TaskContext.kwargs` dictionary.
ray_remote_args_fn: A function that returns a dictionary of remote args
passed to each map worker. The purpose of this argument is to generate
dynamic arguments for each actor/task, and will be called each time
prior to initializing the worker. Args returned from this dict will
always override the args in ``ray_remote_args``. Note: this is an
advanced, experimental feature.
ray_remote_args: Customize the :func:`ray.remote` args for this op's tasks.
"""
super().__init__(
map_transformer,
input_op,
data_context,
name,
target_max_block_size_override,
min_rows_per_bundle,
ref_bundler,
supports_fusion,
map_task_kwargs,
ray_remote_args_fn,
ray_remote_args,
)
if max_concurrency is not None and max_concurrency <= 0:
raise ValueError(f"max_concurrency have to be > 0 (got {max_concurrency})")
self._max_concurrency = max_concurrency
# NOTE: Unlike static Ray remote args, dynamic arguments extracted from the
# blocks themselves are going to be passed inside `fn.options(...)`
# invocation
ray_remote_static_args = {
**(self._ray_remote_args or {}),
"num_returns": "streaming",
"_labels": {self._OPERATOR_ID_LABEL_KEY: self.id},
}
self._map_task = cached_remote_fn(_map_task, **ray_remote_static_args)
def _add_bundled_input(self, bundle: RefBundle):
# Submit the task as a normal Ray task.
ctx = TaskContext(
task_idx=self._next_data_task_idx,
op_name=self.name,
target_max_block_size_override=self.target_max_block_size_override,
)
dynamic_ray_remote_args = self._get_dynamic_ray_remote_args(input_bundle=bundle)
dynamic_ray_remote_args["name"] = self.name
if (
"_generator_backpressure_num_objects" not in dynamic_ray_remote_args
and self.data_context._max_num_blocks_in_streaming_gen_buffer is not None
):
# The `_generator_backpressure_num_objects` parameter should be
# `2 * _max_num_blocks_in_streaming_gen_buffer` because we yield
# 2 objects for each block: the block and the block metadata.
dynamic_ray_remote_args["_generator_backpressure_num_objects"] = (
2 * self.data_context._max_num_blocks_in_streaming_gen_buffer
)
data_context = self.data_context
gen = self._map_task.options(**dynamic_ray_remote_args).remote(
self._map_transformer_ref,
data_context,
ctx,
*bundle.block_refs,
slices=bundle.slices,
**self.get_map_task_kwargs(),
)
self._submit_data_task(gen, bundle)
def progress_str(self) -> str:
return ""
def current_processor_usage(self) -> ExecutionResources:
num_active_workers = self.num_active_tasks()
return ExecutionResources(
cpu=self._ray_remote_args.get("num_cpus", 0) * num_active_workers,
gpu=self._ray_remote_args.get("num_gpus", 0) * num_active_workers,
)
def pending_processor_usage(self) -> ExecutionResources:
return ExecutionResources()
def incremental_resource_usage(self) -> ExecutionResources:
return self.per_task_resource_allocation().copy(
object_store_memory=(
self._metrics.obj_store_mem_max_pending_output_per_task or 0
),
)
def per_task_resource_allocation(self) -> ExecutionResources:
return ExecutionResources(
cpu=self._ray_remote_args.get("num_cpus", 0),
gpu=self._ray_remote_args.get("num_gpus", 0),
memory=self._ray_remote_args.get("memory", 0),
)
def min_scheduling_resources(
self: "PhysicalOperator",
) -> ExecutionResources:
return self.incremental_resource_usage()
def get_max_concurrency_limit(self) -> Optional[int]:
return self._max_concurrency
def all_inputs_done(self):
super().all_inputs_done()
if (
self._max_concurrency is not None
and self._metrics.num_inputs_received < self._max_concurrency
):
warnings.warn(
f"The maximum number of concurrent tasks for '{self.name}' is set to "
f"{self._max_concurrency}, but the operator only received "
f"{self._metrics.num_inputs_received} input(s). This means that the "
f"operator can launch at most {self._metrics.num_inputs_received} "
"task(s), which is less than the concurrency limit. You might be able "
"to increase the number of concurrent tasks by configuring "
"`override_num_blocks` earlier in the pipeline."
)
|
TaskPoolMapOperator
|
python
|
cython__cython
|
tests/run/test_templatelib.py
|
{
"start": 3364,
"end": 8641
}
|
class ____(unittest.TestCase, TStringBaseCase):
def test_common(self):
self.assertEqual(type(t'').__name__, 'Template')
self.assertEqual(type(t'').__qualname__, 'Template')
self.assertEqual(type(t'').__module__, 'string.templatelib')
a = 'a'
i = t'{a}'.interpolations[0]
self.assertEqual(type(i).__name__, 'Interpolation')
self.assertEqual(type(i).__qualname__, 'Interpolation')
self.assertEqual(type(i).__module__, 'string.templatelib')
def test_final_types(self):
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(Template): ...
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(Interpolation): ...
def test_basic_creation(self):
# Simple t-string creation
t = t'Hello, world'
self.assertIsInstance(t, Template)
self.assertTStringEqual(t, ('Hello, world',), ())
self.assertEqual(fstring(t), 'Hello, world')
# Empty t-string
t = t''
self.assertTStringEqual(t, ('',), ())
self.assertEqual(fstring(t), '')
# Multi-line t-string
t = t"""Hello,
world"""
self.assertEqual(t.strings, ('Hello,\nworld',))
self.assertEqual(len(t.interpolations), 0)
self.assertEqual(fstring(t), 'Hello,\nworld')
def test_interpolation_creation(self):
i = Interpolation('Maria', 'name', 'a', 'fmt')
self.assertInterpolationEqual(i, ('Maria', 'name', 'a', 'fmt'))
i = Interpolation('Maria', 'name', 'a')
self.assertInterpolationEqual(i, ('Maria', 'name', 'a'))
i = Interpolation('Maria', 'name')
self.assertInterpolationEqual(i, ('Maria', 'name'))
i = Interpolation('Maria')
self.assertInterpolationEqual(i, ('Maria',))
def test_creation_interleaving(self):
# Should add strings on either side
t = Template(Interpolation('Maria', 'name', None, ''))
self.assertTStringEqual(t, ('', ''), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Maria')
# Should prepend empty string
t = Template(Interpolation('Maria', 'name', None, ''), ' is my name')
self.assertTStringEqual(t, ('', ' is my name'), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Maria is my name')
# Should append empty string
t = Template('Hello, ', Interpolation('Maria', 'name', None, ''))
self.assertTStringEqual(t, ('Hello, ', ''), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Hello, Maria')
# Should concatenate strings
t = Template('Hello', ', ', Interpolation('Maria', 'name', None, ''),
'!')
self.assertTStringEqual(t, ('Hello, ', '!'), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Hello, Maria!')
# Should add strings on either side and in between
t = Template(Interpolation('Maria', 'name', None, ''),
Interpolation('Python', 'language', None, ''))
self.assertTStringEqual(
t, ('', '', ''), [('Maria', 'name'), ('Python', 'language')]
)
self.assertEqual(fstring(t), 'MariaPython')
def test_template_values(self):
t = t'Hello, world'
self.assertEqual(t.values, ())
name = "Lys"
t = t'Hello, {name}'
self.assertEqual(t.values, ("Lys",))
country = "GR"
age = 0
t = t'Hello, {name}, {age} from {country}'
self.assertEqual(t.values, ("Lys", 0, "GR"))
def test_pickle_template(self):
user = 'test'
for template in (
t'',
t"No values",
t'With inter {user}',
t'With ! {user!r}',
t'With format {1 / 0.3:.2f}',
Template(),
Template('a'),
Template(Interpolation('Nikita', 'name', None, '')),
Template('a', Interpolation('Nikita', 'name', 'r', '')),
):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, template=template):
pickled = pickle.dumps(template, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled.values, template.values)
self.assertEqual(fstring(unpickled), fstring(template))
def test_pickle_interpolation(self):
for interpolation in (
Interpolation('Nikita', 'name', None, ''),
Interpolation('Nikita', 'name', 'r', ''),
Interpolation(1/3, 'x', None, '.2f'),
):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, interpolation=interpolation):
pickled = pickle.dumps(interpolation, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled.value, interpolation.value)
self.assertEqual(unpickled.expression, interpolation.expression)
self.assertEqual(unpickled.conversion, interpolation.conversion)
self.assertEqual(unpickled.format_spec, interpolation.format_spec)
|
TestTemplate
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/asset_graph_view/serializable_entity_subset.py
|
{
"start": 1071,
"end": 1772
}
|
class ____(DataclassSerializer):
"""Ensures that the inner PartitionsSubset is converted to a serializable form if necessary."""
def get_storage_name(self) -> str:
# backcompat
return "AssetSubset"
def before_pack(self, value: "SerializableEntitySubset") -> "SerializableEntitySubset": # pyright: ignore[reportIncompatibleMethodOverride]
if value.is_partitioned:
return replace(value, value=value.subset_value.to_serializable_subset())
return value
@whitelist_for_serdes(
serializer=EntitySubsetSerializer,
storage_field_names={"key": "asset_key"},
old_storage_names={"AssetSubset"},
)
@dataclass(frozen=True)
|
EntitySubsetSerializer
|
python
|
cython__cython
|
tests/run/pure_cdef_class_dataclass.py
|
{
"start": 928,
"end": 1043
}
|
class ____:
def __repr__(self):
return "DummyObj()"
@cython.dataclasses.dataclass
@cython.cclass
|
DummyObj
|
python
|
django__django
|
tests/middleware_exceptions/middleware.py
|
{
"start": 1469,
"end": 1669
}
|
class ____(BaseMiddleware):
async def process_view(self, request, view_func, view_args, view_kwargs):
return HttpResponse("Processed view %s" % view_func.__name__)
|
AsyncProcessViewMiddleware
|
python
|
kubernetes-client__python
|
kubernetes/client/models/rbac_v1_subject.py
|
{
"start": 383,
"end": 6882
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, api_group=None, kind=None, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
"""RbacV1Subject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._kind = None
self._name = None
self._namespace = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.kind = kind
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def api_group(self):
"""Gets the api_group of this RbacV1Subject. # noqa: E501
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects. # noqa: E501
:return: The api_group of this RbacV1Subject. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this RbacV1Subject.
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects. # noqa: E501
:param api_group: The api_group of this RbacV1Subject. # noqa: E501
:type: str
"""
self._api_group = api_group
@property
def kind(self):
"""Gets the kind of this RbacV1Subject. # noqa: E501
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
:return: The kind of this RbacV1Subject. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this RbacV1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
:param kind: The kind of this RbacV1Subject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this RbacV1Subject. # noqa: E501
Name of the object being referenced. # noqa: E501
:return: The name of this RbacV1Subject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RbacV1Subject.
Name of the object being referenced. # noqa: E501
:param name: The name of this RbacV1Subject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this RbacV1Subject. # noqa: E501
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
:return: The namespace of this RbacV1Subject. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this RbacV1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
:param namespace: The namespace of this RbacV1Subject. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RbacV1Subject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RbacV1Subject):
return True
return self.to_dict() != other.to_dict()
|
RbacV1Subject
|
python
|
allegroai__clearml
|
clearml/automation/optimization.py
|
{
"start": 1686,
"end": 9580
}
|
class ____(_ObjectiveInterface):
"""
Optimization ``Objective`` class to maximize / minimize over all experiments. This class will sample a specific
scalar from all experiments, and maximize / minimize over single scalar (i.e., title and series combination).
``SearchStrategy`` and ``HyperParameterOptimizer`` use ``Objective`` in the strategy search algorithm.
"""
def __init__(
self,
title: str,
series: str,
order: str = "max",
extremum: bool = False,
) -> ():
"""
Construct ``Objective`` object that will return the scalar value for a specific task ID.
:param str title: The scalar graph title to sample from.
:param str series: The scalar series title to sample from.
:param str order: The setting for maximizing or minimizing the objective scalar value.
The values are:
- ``max``
- ``min``
:param bool extremum: Return the global minimum / maximum reported metric value
The values are:
- ``True`` - Return the global minimum / maximum reported metric value.
- ``False`` - Return the last value reported for a specific Task. (Default)
"""
self.title = title
self.series = series
assert order in (
"min",
"max",
)
# normalize value so we always look for the highest objective value
self.sign = -1 if (isinstance(order, str) and order.lower().strip() == "min") else 1
self._metric = None
self.extremum = extremum
def get_objective(self, task_id: Union[str, Task, ClearmlJob]) -> Optional[float]:
"""
Return a specific task scalar value based on the objective settings (title/series).
:param str task_id: The Task ID to retrieve scalar from (or ``ClearMLJob`` object).
:return: The scalar value.
"""
# create self._metric
self._get_last_metrics_encode_field()
if isinstance(task_id, Task):
task_id = task_id.id
elif isinstance(task_id, ClearmlJob):
task_id = task_id.task_id()
# noinspection PyBroadException, Py
try:
# noinspection PyProtectedMember
task = Task._query_tasks(
task_ids=[task_id],
only_fields=["last_metrics.{}.{}".format(self._metric[0], self._metric[1])],
)[0]
except Exception:
return None
metrics = task.last_metrics
if not metrics:
return None
# noinspection PyBroadException
try:
values = metrics[self._metric[0]][self._metric[1]]
if not self.extremum:
return values["value"]
return values["min_value"] if self.sign < 0 else values["max_value"]
except Exception:
return None
def get_current_raw_objective(self, task: Union[ClearmlJob, Task]) -> (int, float):
"""
Return the current raw value (without sign normalization) of the objective.
:param str task: The Task or Job to retrieve scalar from (or ``ClearmlJob`` object).
:return: Tuple(iteration, value) if, and only if, the metric exists. None if the metric does not exist.
"""
if isinstance(task, Task):
task_id = task.id
elif isinstance(task, ClearmlJob):
task_id = task.task_id()
else:
task_id = task
if not task_id:
raise ValueError("Task ID not provided")
# send request
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
res = Task._get_default_session().send(
events_service.ScalarMetricsIterHistogramRequest(task=task_id, key="iter", samples=None),
)
except Exception:
res = None
if not res:
return None
response = res.wait()
if not response.ok() or not response.response_data:
return None
scalars = response.response_data
# noinspection PyBroadException
try:
return (
scalars[self.title][self.series]["x"][-1],
scalars[self.title][self.series]["y"][-1],
)
except Exception:
return None
def get_objective_sign(self) -> float:
"""
Return the sign of the objective.
- ``+1`` - If maximizing
- ``-1`` - If minimizing
:return: Objective function sign.
"""
return self.sign
def get_objective_metric(self) -> (str, str):
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self.title, self.series
def get_normalized_objective(self, task_id: Union[str, Task, ClearmlJob]) -> Optional[float]:
"""
Return a normalized task scalar value based on the objective settings (title/series).
I.e. objective is always to maximize the returned value
:param str task_id: The Task ID to retrieve scalar from.
:return: Normalized scalar value.
"""
objective = self.get_objective(task_id=task_id)
if objective is None:
return None
# normalize value so we always look for the highest objective value
return self.sign * objective
def get_top_tasks(
self,
top_k: int,
optimizer_task_id: Optional[str] = None,
task_filter: Optional[dict] = None,
) -> Sequence[Task]:
"""
Return a list of Tasks of the top performing experiments, based on the title/series objective.
:param int top_k: The number of Tasks (experiments) to return.
:param str optimizer_task_id: Parent optimizer Task ID
:param dict task_filter: Optional task_filtering for the query
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
task_filter = deepcopy(task_filter) if task_filter else {}
task_filter.update({"page_size": int(top_k), "page": 0})
if optimizer_task_id:
task_filter["parent"] = optimizer_task_id
order_by = self._get_last_metrics_encode_field()
if order_by and (order_by.startswith("last_metrics") or order_by.startswith("-last_metrics")):
parts = order_by.split(".")
if parts[-1] in ("min", "max", "last"):
title = hashlib.md5(str(parts[1]).encode("utf-8")).hexdigest()
series = hashlib.md5(str(parts[2]).encode("utf-8")).hexdigest()
minmax = "min_value" if "min" in parts[3] else ("max_value" if "max" in parts[3] else "value")
order_by = "{}last_metrics.".join(
(
"-" if order_by and order_by[0] == "-" else "",
title,
series,
minmax,
)
)
if order_by:
task_filter["order_by"] = [order_by]
return Task.get_tasks(task_filter=task_filter)
def _get_last_metrics_encode_field(self) -> str:
"""
Return encoded representation of the title/series metric.
:return: The objective title/series.
"""
if not self._metric:
title = hashlib.md5(str(self.title).encode("utf-8")).hexdigest()
series = hashlib.md5(str(self.series).encode("utf-8")).hexdigest()
self._metric = title, series
return "{}last_metrics.{}.{}.{}".format(
"-" if self.sign > 0 else "",
self._metric[0],
self._metric[1],
("min_value" if self.sign < 0 else "max_value") if self.extremum else "value",
)
|
Objective
|
python
|
huggingface__transformers
|
src/transformers/models/seed_oss/modular_seed_oss.py
|
{
"start": 5775,
"end": 7148
}
|
class ____(LlamaForCausalLM):
def forward(
self,
**super_kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, SeedOssForCausalLM
>>> model = SeedOssForCausalLM.from_pretrained("ByteDance-Seed/SeedOss-36B")
>>> tokenizer = AutoTokenizer.from_pretrained("ByteDance-Seed/SeedOss-36B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
|
SeedOssForCausalLM
|
python
|
doocs__leetcode
|
solution/3200-3299/3259.Maximum Energy Boost From Two Drinks/Solution.py
|
{
"start": 0,
"end": 440
}
|
class ____:
def maxEnergyBoost(self, energyDrinkA: List[int], energyDrinkB: List[int]) -> int:
n = len(energyDrinkA)
f = [[0] * 2 for _ in range(n)]
f[0][0] = energyDrinkA[0]
f[0][1] = energyDrinkB[0]
for i in range(1, n):
f[i][0] = max(f[i - 1][0] + energyDrinkA[i], f[i - 1][1])
f[i][1] = max(f[i - 1][1] + energyDrinkB[i], f[i - 1][0])
return max(f[n - 1])
|
Solution
|
python
|
eventlet__eventlet
|
eventlet/db_pool.py
|
{
"start": 286,
"end": 380
}
|
class ____(Exception):
pass
def cleanup_rollback(conn):
conn.rollback()
|
ConnectTimeout
|
python
|
celery__celery
|
t/unit/tasks/test_result.py
|
{
"start": 16673,
"end": 21207
}
|
class ____:
def test_resultset_repr(self):
assert repr(self.app.ResultSet(
[self.app.AsyncResult(t) for t in ['1', '2', '3']]))
def test_eq_other(self):
assert self.app.ResultSet([
self.app.AsyncResult(t) for t in [1, 3, 3]]) != 1
rs1 = self.app.ResultSet([self.app.AsyncResult(1)])
rs2 = self.app.ResultSet([self.app.AsyncResult(1)])
assert rs1 == rs2
def test_get(self):
x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]])
b = x.results[0].backend = Mock()
b.supports_native_join = False
x.join_native = Mock()
x.join = Mock()
x.get()
x.join.assert_called()
b.supports_native_join = True
x.get()
x.join_native.assert_called()
@patch('celery.result.task_join_will_block')
def test_get_sync_subtask_option(self, task_join_will_block):
task_join_will_block.return_value = True
x = self.app.ResultSet([self.app.AsyncResult(str(t)) for t in [1, 2, 3]])
b = x.results[0].backend = Mock()
b.supports_native_join = False
with pytest.raises(RuntimeError):
x.get()
with pytest.raises(TimeoutError):
x.get(disable_sync_subtasks=False, timeout=0.1)
def test_join_native_with_group_chain_group(self):
"""Test group(chain(group)) case, join_native can be run correctly.
In group(chain(group)) case, GroupResult has no _cache property, and
AsyncBackendMixin.iter_native returns a node instead of node._cache,
this test make sure ResultSet.join_native can process correctly both
values of AsyncBackendMixin.iter_native returns.
"""
def _get_meta(tid, result=None, children=None):
return {
'status': states.SUCCESS,
'result': result,
'children': children,
'task_id': tid,
}
results = [self.app.AsyncResult(t) for t in [1, 2, 3]]
values = [(_.id, _get_meta(_.id, _)) for _ in results]
g_res = GroupResult(6, [self.app.AsyncResult(t) for t in [4, 5]])
results += [g_res]
values += [(6, g_res.children)]
x = self.app.ResultSet(results)
x.results[0].backend = Mock()
x.results[0].backend.join = Mock()
x.results[3][0].get = Mock()
x.results[3][0].get.return_value = g_res.results[0]
x.results[3][1].get = Mock()
x.results[3][1].get.return_value = g_res.results[1]
x.iter_native = Mock()
x.iter_native.return_value = values.__iter__()
x.join_native()
x.iter_native.assert_called()
def test_eq_ne(self):
g1 = self.app.ResultSet([
self.app.AsyncResult('id1'),
self.app.AsyncResult('id2'),
])
g2 = self.app.ResultSet([
self.app.AsyncResult('id1'),
self.app.AsyncResult('id2'),
])
g3 = self.app.ResultSet([
self.app.AsyncResult('id3'),
self.app.AsyncResult('id1'),
])
assert g1 == g2
assert g1 != g3
assert g1 != object()
def test_takes_app_from_first_task(self):
x = ResultSet([self.app.AsyncResult('id1')])
assert x.app is x.results[0].app
x.app = self.app
assert x.app is self.app
def test_get_empty(self):
x = self.app.ResultSet([])
assert x.supports_native_join is None
x.join = Mock(name='join')
x.get()
x.join.assert_called()
def test_add(self):
x = self.app.ResultSet([self.app.AsyncResult(1)])
x.add(self.app.AsyncResult(2))
assert len(x) == 2
x.add(self.app.AsyncResult(2))
assert len(x) == 2
@contextmanager
def dummy_copy(self):
with patch('celery.result.copy') as copy:
def pass_value(arg):
return arg
copy.side_effect = pass_value
yield
def test_add_discard(self):
x = self.app.ResultSet([])
x.add(self.app.AsyncResult('1'))
assert self.app.AsyncResult('1') in x.results
x.discard(self.app.AsyncResult('1'))
x.discard(self.app.AsyncResult('1'))
x.discard('1')
assert self.app.AsyncResult('1') not in x.results
x.update([self.app.AsyncResult('2')])
def test_clear(self):
x = self.app.ResultSet([])
r = x.results
x.clear()
assert x.results is r
|
test_ResultSet
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/refurb/FURB189.py
|
{
"start": 80,
"end": 292
}
|
class ____:
__slots__ = ()
def __setitem__(self, key, value):
if key in self:
raise KeyError(str(key) + ' already set')
return super().__setitem__(key, value)
|
SetOnceMappingMixin
|
python
|
sympy__sympy
|
sympy/physics/quantum/tests/test_dagger.py
|
{
"start": 1270,
"end": 2632
}
|
class ____(Expr):
def _eval_adjoint(self):
return I
def test_eval_adjoint():
f = Foo()
d = Dagger(f)
assert d == I
np = import_module('numpy')
def test_numpy_dagger():
if not np:
skip("numpy not installed.")
a = np.array([[1.0, 2.0j], [-1.0j, 2.0]])
adag = a.copy().transpose().conjugate()
assert (Dagger(a) == adag).all()
scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']})
def test_scipy_sparse_dagger():
if not np:
skip("numpy not installed.")
if not scipy:
skip("scipy not installed.")
else:
sparse = scipy.sparse
a = sparse.csr_matrix([[1.0 + 0.0j, 2.0j], [-1.0j, 2.0 + 0.0j]])
adag = a.copy().transpose().conjugate()
assert np.linalg.norm((Dagger(a) - adag).todense()) == 0.0
def test_unknown():
"""Check treatment of unknown objects.
Objects without adjoint or conjugate/transpose methods
are sympified and wrapped in dagger.
"""
x = symbols("x", commutative=False)
result = Dagger(x)
assert result.args == (x,) and isinstance(result, adjoint)
def test_unevaluated():
"""Check that evaluate=False returns unevaluated Dagger.
"""
x = symbols("x", real=True)
assert Dagger(x) == x
result = Dagger(x, evaluate=False)
assert result.args == (x,) and isinstance(result, adjoint)
|
Foo
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_quote.py
|
{
"start": 31207,
"end": 34071
}
|
class ____(fixtures.TestBase):
def test_concat_quotetrue(self):
q1 = quoted_name("x", True)
self._assert_not_quoted("y" + q1)
def test_concat_quotefalse(self):
q1 = quoted_name("x", False)
self._assert_not_quoted("y" + q1)
def test_concat_quotenone(self):
q1 = quoted_name("x", None)
self._assert_not_quoted("y" + q1)
def test_rconcat_quotetrue(self):
q1 = quoted_name("x", True)
self._assert_not_quoted("y" + q1)
def test_rconcat_quotefalse(self):
q1 = quoted_name("x", False)
self._assert_not_quoted("y" + q1)
def test_rconcat_quotenone(self):
q1 = quoted_name("x", None)
self._assert_not_quoted("y" + q1)
def test_concat_anon(self):
q1 = _anonymous_label(quoted_name("x", True))
assert isinstance(q1, _anonymous_label)
value = q1 + "y"
assert isinstance(value, _anonymous_label)
self._assert_quoted(value, True)
def test_rconcat_anon(self):
q1 = _anonymous_label(quoted_name("x", True))
assert isinstance(q1, _anonymous_label)
value = "y" + q1
assert isinstance(value, _anonymous_label)
self._assert_quoted(value, True)
def test_coerce_quoted_switch(self):
q1 = quoted_name("x", False)
q2 = quoted_name(q1, True)
eq_(q2.quote, True)
def test_coerce_quoted_none(self):
q1 = quoted_name("x", False)
q2 = quoted_name(q1, None)
eq_(q2.quote, False)
def test_coerce_quoted_retain(self):
q1 = quoted_name("x", False)
q2 = quoted_name(q1, False)
eq_(q2.quote, False)
def test_coerce_none(self):
q1 = quoted_name.construct(None, False)
eq_(q1, None)
def test_apply_map_quoted(self):
q1 = _anonymous_label(quoted_name("x%s", True))
q2 = q1.apply_map("bar")
eq_(q2, "xbar")
eq_(q2.quote, True)
def test_apply_map_plain(self):
q1 = _anonymous_label(quoted_name("x%s", None))
q2 = q1.apply_map("bar")
eq_(q2, "xbar")
self._assert_not_quoted(q2)
def test_pickle_quote(self):
q1 = quoted_name("x", True)
for loads, dumps in picklers():
q2 = loads(dumps(q1))
eq_(str(q1), str(q2))
eq_(q1.quote, q2.quote)
def test_pickle_anon_label(self):
q1 = _anonymous_label(quoted_name("x", True))
for loads, dumps in picklers():
q2 = loads(dumps(q1))
assert isinstance(q2, _anonymous_label)
eq_(str(q1), str(q2))
eq_(q1.quote, q2.quote)
def _assert_quoted(self, value, quote):
assert isinstance(value, quoted_name)
eq_(value.quote, quote)
def _assert_not_quoted(self, value):
assert not isinstance(value, quoted_name)
|
QuotedIdentTest
|
python
|
keras-team__keras
|
keras/src/layers/reshaping/zero_padding2d.py
|
{
"start": 287,
"end": 4646
}
|
class ____(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros at the top, bottom, left and
right side of an image tensor.
Example:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[[0 1]
[2 3]]]]
>>> y = keras.layers.ZeroPadding2D(padding=1)(x)
>>> y
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]]
Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding is applied to height and width.
- If tuple of 2 ints: interpreted as two different symmetric padding
values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints: interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, height, width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, height, width)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_height, padded_width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_height, padded_width)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, "__len__"):
if len(padding) != 2:
raise ValueError(
"`padding` should have two elements. "
f"Received: padding={padding}."
)
height_padding = argument_validation.standardize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
width_padding = argument_validation.standardize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
self.padding = (height_padding, width_padding)
else:
raise ValueError(
"`padding` should be either an int, a tuple of 2 ints "
"(symmetric_height_crop, symmetric_width_crop), "
"or a tuple of 2 tuples of 2 ints "
"((top_crop, bottom_crop), (left_crop, right_crop)). "
f"Received: padding={padding}."
)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
for index in range(0, 2):
if output_shape[index + spatial_dims_offset] is not None:
output_shape[index + spatial_dims_offset] += (
self.padding[index][0] + self.padding[index][1]
)
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), *self.padding)
else:
all_dims_padding = ((0, 0), *self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
|
ZeroPadding2D
|
python
|
facebookresearch__faiss
|
tests/test_index_accuracy.py
|
{
"start": 24173,
"end": 25463
}
|
class ____(unittest.TestCase):
def do_test(self, metric):
d = 32
xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)
index1 = faiss.index_factory(d, "PQ4x4np", metric)
Dref, Iref = faiss.knn(xq, xb, 10, metric)
index1.train(xt)
index1.add(xb)
D1, I1 = index1.search(xq, 100)
recall1 = (I1 == Iref[:, :1]).sum()
# add refine index on top
index_flat = faiss.IndexFlat(d, metric)
index_flat.add(xb)
index2 = faiss.IndexRefine(index1, index_flat)
index2.k_factor = 10.0
D2, I2 = index2.search(xq, 10)
# check distance is computed properly
for i in range(len(xq)):
x1 = xq[i]
x2 = xb[I2[i, 5]]
if metric == faiss.METRIC_L2:
dref = ((x1 - x2) ** 2).sum()
else:
dref = np.dot(x1, x2)
np.testing.assert_almost_equal(dref, D2[i, 5], decimal=5)
# check that with refinement, the recall@10 is the same as
# the original recall@100
recall2 = (I2 == Iref[:, :1]).sum()
self.assertEqual(recall1, recall2)
def test_IP(self):
self.do_test(faiss.METRIC_INNER_PRODUCT)
def test_L2(self):
self.do_test(faiss.METRIC_L2)
|
TestRefine
|
python
|
ray-project__ray
|
rllib/algorithms/impala/impala_tf_policy.py
|
{
"start": 6181,
"end": 7894
}
|
class ____:
"""VTrace version of gradient computation logic."""
def __init__(self):
"""No special initialization required."""
pass
def compute_gradients_fn(
self, optimizer: LocalOptimizer, loss: TensorType
) -> ModelGradients:
# Supporting more than one loss/optimizer.
trainable_variables = self.model.trainable_variables()
if self.config["_tf_policy_handles_more_than_one_loss"]:
optimizers = force_list(optimizer)
losses = force_list(loss)
assert len(optimizers) == len(losses)
clipped_grads_and_vars = []
for optim, loss_ in zip(optimizers, losses):
grads_and_vars = optim.compute_gradients(loss_, trainable_variables)
clipped_g_and_v = []
for g, v in grads_and_vars:
if g is not None:
clipped_g, _ = tf.clip_by_global_norm(
[g], self.config["grad_clip"]
)
clipped_g_and_v.append((clipped_g[0], v))
clipped_grads_and_vars.append(clipped_g_and_v)
self.grads = [g for g_and_v in clipped_grads_and_vars for (g, v) in g_and_v]
# Only one optimizer and and loss term.
else:
grads_and_vars = optimizer.compute_gradients(
loss, self.model.trainable_variables()
)
grads = [g for (g, v) in grads_and_vars]
self.grads, _ = tf.clip_by_global_norm(grads, self.config["grad_clip"])
clipped_grads_and_vars = list(zip(self.grads, trainable_variables))
return clipped_grads_and_vars
|
VTraceClipGradients
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/git_util_test.py
|
{
"start": 4798,
"end": 11692
}
|
class ____(unittest.TestCase):
def test_git_repo_invalid(self):
with patch("git.Repo") as mock:
mock.side_effect = InvalidGitRepositoryError("Not a git repo")
repo = GitRepo(".")
assert not repo.is_valid()
def test_old_git_version(self):
"""If the installed git is older than 2.7, certain repo operations
prompt the user for credentials. We don't want to do this, so
repo.is_valid() returns False for old gits.
"""
with (
patch("git.repo.base.Repo.GitCommandWrapperType") as git_mock,
patch("streamlit.git_util.os"),
):
git_mock.return_value.version_info = (1, 6, 4) # An old git version
repo = GitRepo(".")
assert not repo.is_valid()
assert repo.git_version == (1, 6, 4)
def test_git_repo_valid(self):
with (
patch("git.repo.base.Repo.GitCommandWrapperType") as git_mock,
patch("streamlit.git_util.os"),
):
git_mock.return_value.version_info = (2, 20, 3) # A recent git version
repo = GitRepo(".")
assert repo.is_valid()
assert repo.git_version == (2, 20, 3)
def test_gitpython_not_installed(self):
with patch.dict("sys.modules", {"git": None}):
repo = GitRepo(".")
assert not repo.is_valid()
def test_get_repo_info_https_userinfo(self) -> None:
"""Ensure get_repo_info extracts owner/repo from https with userinfo."""
with _mock_git_repo(
module_path="/repo/sub/module",
remote_urls=("https://user@github.com/owner/repo.git",),
) as gr:
assert gr.get_repo_info() == ("owner/repo", "main", "sub/module")
def test_get_repo_info_ssh_scp(self) -> None:
"""Ensure get_repo_info extracts owner/repo from scp-like ssh url."""
with _mock_git_repo(
module_path="/repo/sub/module",
remote_urls=("git@github.com:owner/repo.git",),
) as gr:
assert gr.get_repo_info() == ("owner/repo", "main", "sub/module")
def test_get_repo_info_no_tracking_branch(self) -> None:
"""Return None when there is no tracking branch configured."""
with _mock_git_repo(
module_path="/repo/sub/module", tracking_branch_name=None
) as gr:
assert gr.get_repo_info() is None
def test_get_repo_info_no_matching_remote_url(self) -> None:
"""Return None when remote URLs don't match GitHub."""
with _mock_git_repo(
module_path="/repo/sub/module",
remote_urls=("git@example.com:owner/repo.git",),
) as gr:
assert gr.get_repo_info() is None
def test_get_repo_info_head_detached(self) -> None:
"""Return None when HEAD is detached (no active branch)."""
with _mock_git_repo(module_path="/repo/sub/module", head_detached=True) as gr:
assert gr.get_repo_info() is None
def test_get_tracking_branch_remote_branch_with_slashes(self) -> None:
"""Branch names with slashes are preserved after the remote name segment."""
with _mock_git_repo(tracking_branch_name="origin/feature/foo/bar") as gr:
result = gr.get_tracking_branch_remote()
assert result is not None
_, branch = result
assert branch == "feature/foo/bar"
def test_get_tracking_branch_remote_missing_remote(self) -> None:
"""If the named remote cannot be resolved, return None."""
with _mock_git_repo(
tracking_branch_name="missing/main",
remote_exception=RuntimeError("remote not found"),
) as gr:
assert gr.get_tracking_branch_remote() is None
def test_ahead_commits_success(self) -> None:
"""ahead_commits returns commits compared to the remote branch."""
commit1 = object()
commit2 = object()
with _mock_git_repo(iter_commits=(commit1, commit2)) as gr:
assert gr.ahead_commits == [commit1, commit2]
def test_ahead_commits_no_tracking(self) -> None:
"""ahead_commits returns None when there's no tracking branch."""
with _mock_git_repo(tracking_branch_name=None) as gr:
assert gr.ahead_commits is None
def test_ahead_commits_iter_exception_returns_empty(self) -> None:
"""On errors iterating commits, ahead_commits returns an empty list."""
with _mock_git_repo(iter_commits_exc=RuntimeError("boom")) as gr:
assert gr.ahead_commits == []
def test_untracked_files_property(self) -> None:
"""untracked_files returns repo list when valid, else None."""
# valid repo
with _mock_git_repo(untracked_files=("a.txt", "b.txt")) as gr:
assert gr.untracked_files == ["a.txt", "b.txt"]
# invalid repo
with patch("git.Repo") as repo_ctor:
repo_ctor.side_effect = Exception("no repo")
gr = GitRepo("/repo")
assert gr.untracked_files is None
def test_uncommitted_files_property(self) -> None:
"""uncommitted_files returns index.diff(None) a_path entries; None if invalid."""
# valid repo
with _mock_git_repo(diff_paths=("x.py", "y.py")) as gr:
assert gr.uncommitted_files == ["x.py", "y.py"]
# invalid repo
with patch("git.Repo") as repo_ctor:
repo_ctor.side_effect = Exception("no repo")
gr = GitRepo("/repo")
assert gr.uncommitted_files is None
def test_is_head_detached_property(self) -> None:
"""is_head_detached reflects repo.head.is_detached when valid; False if invalid."""
# valid repo - attached
with _mock_git_repo(head_detached=False) as gr:
assert gr.is_head_detached is False
# valid repo - detached
with _mock_git_repo(head_detached=True) as gr:
assert gr.is_head_detached is True
# invalid repo
with patch("git.Repo") as repo_ctor:
repo_ctor.side_effect = Exception("no repo")
gr = GitRepo("/repo")
assert gr.is_head_detached is False
def test_tracking_branch_property(self) -> None:
"""tracking_branch returns None for invalid or detached HEAD; else value."""
# valid repo, attached head
with _mock_git_repo() as gr:
# When not detached and tracking is configured, property should be truthy
assert gr.tracking_branch is not None
# valid repo, detached head
with _mock_git_repo(head_detached=True) as gr:
assert gr.tracking_branch is None
# invalid repo
with patch("git.Repo") as repo_ctor:
repo_ctor.side_effect = Exception("no repo")
gr = GitRepo("/repo")
assert gr.tracking_branch is None
|
GitUtilTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-area-rectangle-with-point-constraints-i.py
|
{
"start": 1588,
"end": 2302
}
|
class ____(object):
def maxRectangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
result = -1
points.sort()
for i in xrange(len(points)-3):
if points[i][0] != points[i+1][0]:
continue
j = next((j for j in xrange(i+2, len(points)-1) if points[i][1] <= points[j][1] <= points[i+1][1]), len(points)-1)
if j == len(points)-1 or not (points[j][0] == points[j+1][0] and points[i][1] == points[j][1] and points[i+1][1] == points[j+1][1]):
continue
result = max(result, (points[i+1][1]-points[i][1])*(points[j][0]-points[i][0]))
return result
|
Solution2
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 342152,
"end": 343283
}
|
class ____(Response):
"""
Response of tasks.make_public endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_public"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePublicResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
MakePublicResponse
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_grpc_util.py
|
{
"start": 503,
"end": 4367
}
|
class ____:
def __init__(self):
self.address = None
def add_insecure_port(self, address):
self.address = address
def fake_service_handler_factory(service_method: str, stream: bool) -> Callable:
def foo() -> bytes:
return f"{'stream' if stream else 'unary'} call from {service_method}".encode()
return foo
def test_grpc_server():
"""Test `gRPCGenericServer` did the correct overrides.
When a add_servicer_to_server function is called on an instance of `gRPCGenericServer`,
it correctly overrides `response_serializer` to None, and `unary_unary` and
`unary_stream` to be generated from the factory function.
"""
service_name = "ray.serve.ServeAPIService"
method_name = "ServeRoutes"
def add_test_servicer_to_server(servicer, server):
rpc_method_handlers = {
method_name: grpc.unary_unary_rpc_method_handler(
servicer.ServeRoutes,
request_deserializer=AnyProto.FromString,
response_serializer=AnyProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
service_name, rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
grpc_server = gRPCGenericServer(fake_service_handler_factory)
dummy_servicer = Mock()
# Ensure `generic_rpc_handlers` is not populated before calling
# the add_servicer_to_server function.
assert grpc_server.generic_rpc_handlers == []
add_test_servicer_to_server(dummy_servicer, grpc_server)
# `generic_rpc_handlers` should be populated after add_servicer_to_server is called.
assert len(grpc_server.generic_rpc_handlers) == 1
# The populated rpc handler should have the correct service name.
rpc_handler = grpc_server.generic_rpc_handlers[0][0]
assert rpc_handler.service_name() == service_name
# The populated method handlers should have the correct response_serializer,
# unary_unary, and unary_stream.
service_method = f"/{service_name}/{method_name}"
method_handlers = rpc_handler._method_handlers.get(service_method)
assert method_handlers.response_serializer is None
assert method_handlers.unary_unary() == f"unary call from {service_method}".encode()
assert (
method_handlers.unary_stream() == f"stream call from {service_method}".encode()
)
def test_ray_serve_grpc_context_serializable():
"""RayServegRPCContext should be serializable."""
context = RayServegRPCContext(FakeGrpcContext())
pickled_context = pickle.dumps(context)
deserialized_context = pickle.loads(pickled_context)
assert deserialized_context.__dict__ == context.__dict__
cloudpickled_context = cloudpickle.dumps(context)
deserialized_context = pickle.loads(cloudpickled_context)
assert deserialized_context.__dict__ == context.__dict__
def test_add_grpc_address():
"""Test `add_grpc_address` adds the address to the gRPC server."""
fake_grpc_server = FakeGrpcServer()
grpc_address = "fake_address:50051"
assert fake_grpc_server.address is None
add_grpc_address(fake_grpc_server, grpc_address)
assert fake_grpc_server.address == grpc_address
def test_get_grpc_response_status_backpressure_error():
"""Test that BackPressureError returns RESOURCE_EXHAUSTED status."""
backpressure_error = BackPressureError(
num_queued_requests=10, max_queued_requests=5
)
status = get_grpc_response_status(
exc=backpressure_error, request_timeout_s=30.0, request_id="test_request_123"
)
assert status.code == grpc.StatusCode.RESOURCE_EXHAUSTED
assert status.is_error is True
assert status.message == backpressure_error.message
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
FakeGrpcServer
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_cascade.py
|
{
"start": 102736,
"end": 107686
}
|
class ____(fixtures.MappedTest):
@classmethod
def define_tables(self, metadata):
Table(
"core",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("related_one_id", Integer, ForeignKey("related_one.id")),
Column("related_two_id", Integer, ForeignKey("related_two.id")),
)
Table(
"related_one",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"related_two",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
def _fixture(
self,
legacy_is_orphan,
persistent,
r1_present,
r2_present,
detach_event=True,
):
class Core:
pass
class RelatedOne:
def __init__(self, cores):
self.cores = cores
class RelatedTwo:
def __init__(self, cores):
self.cores = cores
self.mapper_registry.map_imperatively(
Core, self.tables.core, legacy_is_orphan=legacy_is_orphan
)
self.mapper_registry.map_imperatively(
RelatedOne,
self.tables.related_one,
properties={
"cores": relationship(
Core, cascade="all, delete-orphan", backref="r1"
)
},
)
self.mapper_registry.map_imperatively(
RelatedTwo,
self.tables.related_two,
properties={
"cores": relationship(
Core, cascade="all, delete-orphan", backref="r2"
)
},
)
c1 = Core()
if detach_event:
RelatedOne(cores=[c1])
RelatedTwo(cores=[c1])
else:
if r1_present:
RelatedOne(cores=[c1])
if r2_present:
RelatedTwo(cores=[c1])
if persistent:
s = fixture_session()
s.add(c1)
s.flush()
if detach_event:
if not r1_present:
c1.r1 = None
if not r2_present:
c1.r2 = None
return c1
def _assert_not_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert not mapper._is_orphan(state)
def _assert_is_orphan(self, c1):
mapper = object_mapper(c1)
state = instance_state(c1)
assert mapper._is_orphan(state)
def test_leg_pers_r1_r2(self):
c1 = self._fixture(True, True, True, True)
self._assert_not_orphan(c1)
def test_current_pers_r1_r2(self):
c1 = self._fixture(False, True, True, True)
self._assert_not_orphan(c1)
def test_leg_pers_r1_notr2(self):
c1 = self._fixture(True, True, True, False)
self._assert_not_orphan(c1)
def test_current_pers_r1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_pers_notr1_notr2(self):
c1 = self._fixture(True, True, False, False)
self._assert_is_orphan(c1)
def test_current_pers_notr1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_r1_r2(self):
c1 = self._fixture(True, False, True, True)
self._assert_not_orphan(c1)
def test_current_transient_r1_r2(self):
c1 = self._fixture(False, False, True, True)
self._assert_not_orphan(c1)
def test_leg_transient_r1_notr2(self):
c1 = self._fixture(True, False, True, False)
self._assert_not_orphan(c1)
def test_current_transient_r1_notr2(self):
c1 = self._fixture(False, False, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2(self):
c1 = self._fixture(True, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2(self):
c1 = self._fixture(False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2_noevent(self):
c1 = self._fixture(True, False, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2_noevent(self):
c1 = self._fixture(False, False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(True, True, False, False, False)
self._assert_not_orphan(c1)
def test_current_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(False, True, False, False, False)
self._assert_not_orphan(c1)
|
OrphanCriterionTest
|
python
|
pandas-dev__pandas
|
pandas/core/resample.py
|
{
"start": 70662,
"end": 70914
}
|
class ____( # type: ignore[misc]
_GroupByMixin, PeriodIndexResampler
):
"""
Provides a resample of a groupby implementation.
"""
@property
def _resampler_cls(self):
return PeriodIndexResampler
|
PeriodIndexResamplerGroupby
|
python
|
cherrypy__cherrypy
|
cherrypy/process/wspbus.py
|
{
"start": 3575,
"end": 4579
}
|
class ____(Exception):
"""Exception raised during errors on Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
"""Initialize ChannelFailures errors wrapper."""
super(ChannelFailures, self).__init__(*args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
"""Render the list of errors, which happened in channel."""
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
"""Determine whether any error happened in channel."""
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
|
ChannelFailures
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/_oid.py
|
{
"start": 9196,
"end": 9949
}
|
class ____:
SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1")
CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2")
CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3")
EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4")
TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8")
OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9")
ANY_EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37.0")
SMARTCARD_LOGON = ObjectIdentifier("1.3.6.1.4.1.311.20.2.2")
KERBEROS_PKINIT_KDC = ObjectIdentifier("1.3.6.1.5.2.3.5")
IPSEC_IKE = ObjectIdentifier("1.3.6.1.5.5.7.3.17")
BUNDLE_SECURITY = ObjectIdentifier("1.3.6.1.5.5.7.3.35")
CERTIFICATE_TRANSPARENCY = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.4")
|
ExtendedKeyUsageOID
|
python
|
apache__airflow
|
providers/databricks/tests/unit/databricks/hooks/test_databricks.py
|
{
"start": 51546,
"end": 52730
}
|
class ____:
"""
Tests for DatabricksHook.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=None,
password=TOKEN,
extra=None,
)
)
self.hook = DatabricksHook(retry_delay=0)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_submit_run(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {"run_id": "1"}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {"notebook_task": NOTEBOOK_TASK, "new_cluster": NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == "1"
args = mock_requests.post.call_args
kwargs = args[1]
assert kwargs["auth"].token == TOKEN
@pytest.mark.db_test
|
TestDatabricksHookTokenInPassword
|
python
|
PyCQA__pylint
|
tests/functional/f/function_redefined.py
|
{
"start": 455,
"end": 2025
}
|
class ____: # [function-redefined]
"""docstring"""
def __init__(self):
pass
def yeah(self):
"""hehehe"""
def yoo(self):
"""yoo"""
def func1():
"""docstring"""
def func2():
"""docstring"""
def func2(): # [function-redefined]
"""docstring"""
__revision__ = 1 # [redefined-outer-name]
return __revision__
if __revision__:
def exclusive_func():
"docstring"
else:
def exclusive_func():
"docstring"
try:
def exclusive_func2():
"docstring"
except TypeError:
def exclusive_func2():
"docstring"
else:
def exclusive_func2(): # [function-redefined]
"this one redefine the one defined line 42"
def with_inner_function_1():
"""docstring"""
def callback():
"""callback docstring"""
pass
return callback
def with_inner_function_2():
"""docstring"""
def callback():
"""does not redefine callback returned by with_inner_function_1"""
pass
return callback
def some_func():
"""Don't emit if we defined a variable with the same name as a
__future__ directive.
"""
division = 2
return division
def dummy_func():
"""First dummy function"""
pass
def dummy_func2():
"""Second dummy function, don't emit function-redefined message
because of the dummy name"""
pass
from math import ceil
def ceil(): # [function-redefined]
pass
import math
def math(): # [function-redefined]
pass
import math as _
def fun():
pass
# pylint: disable=too-few-public-methods
|
AAAA
|
python
|
tensorflow__tensorflow
|
tensorflow/python/client/session_clusterspec_prop_test.py
|
{
"start": 1754,
"end": 23373
}
|
class ____(test_util.TensorFlowTestCase):
def testClusterSpecPropagationSimple(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config)
output = self.evaluate(const)
self.assertEqual(17, output)
def testClusterSpecPropagationWorker2Placement(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:1'):
with ops.device('/cpu:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testClusterSpecPropagationWorker1Placement(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:0'):
const = constant_op.constant(17)
with session.Session(server1.target, config=config, graph=g):
output = self.evaluate(const)
self.assertEqual(17, output)
def testCanonicalDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device(
'/job:worker/task:1/device:CPU:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testFullDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'renamed_worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device(
'/job:renamed_worker/replica:0/task:1/device:CPU:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:renamed_worker/replica:0/task:1/device:CPU:0'
== dev_stats.device and 'Const' == node_stats.node_name
]))
def testMultipleLocalDevices(self):
# Note: CPU->CPU transfers have a fast-path in
# BaseRemoteRendezvous::SameWorkerRecvDone that means the test doesn't
# actually capture the motivating bug unless run on a GPU machine.
#
# Example error message (before bugfix -- line breaks added because lint):
#
# W0718 17:14:41.521534 190121 device_mgr.cc:107] Unknown device:
# /job:worker/replica:0/task:0/device:CPU:0 all devices:
# /job:local/replica:0/task:0/device:GPU:0,
# /job:local/replica:0/task:0/device:GPU:0,
# /job:local/replica:0/task:0/cpu:1, CPU:0, GPU:0,
# /job:local/replica:0/task:0/device:CPU:1,
# /job:local/replica:0/task:0/device:CPU:0, CPU:1,
# /job:local/replica:0/task:0/cpu:0
server_config = config_pb2.ConfigProto(device_count={'CPU': 2})
server1 = server_lib.Server.create_local_server(config=server_config)
server2 = server_lib.Server.create_local_server(config=server_config)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g:
with ops.device('/job:worker/task:1/cpu:1'):
input1 = constant_op.constant(17, dtypes.float32)
with ops.device('/job:worker/task:0/cpu:1'):
input2 = constant_op.constant(3, dtypes.float32)
with ops.device('/job:worker/task:1/cpu:0'):
sum1 = input1 + input2
if test.is_gpu_available():
device_str = '/job:worker/task:0/device:GPU:0'
else:
device_str = '/job:worker/task:0/cpu:1'
with ops.device(device_str):
sum2 = input2 + input1
with ops.device('/job:worker/task:0/cpu:0'):
sum3 = sum1 + sum2
with session.Session(server1.target, config=config, graph=g):
output = self.evaluate(sum3)
self.assertEqual(40, output)
def testLegacyDeviceNames(self):
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.Graph().as_default() as g, ops.device('/job:worker/task:1/cpu:0'):
const = constant_op.constant(17)
sess = session.Session(server1.target, config=config, graph=g)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
output = sess.run(const, options=run_options, run_metadata=run_metadata)
self.assertEqual(17, output)
self.assertEqual(1,
len([
node_stats
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:1/device:CPU:0' ==
dev_stats.device and 'Const' == node_stats.node_name
]))
def testClusterSpecPropagationThreeServers2Graphs(self):
"""Boots 3 servers, creates 2 sessions, ensures appropriate operations.
We create 2 clusterspecs:
1. server2 as the master, server1 as a worker
2. server2 as the master, server3 as a worker
We ensure that variables on the workers are independent.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def1 = cluster_pb2.ClusterDef()
job1 = cluster_def1.job.add()
job1.name = 'worker1'
job1.tasks[0] = server2.target[len('grpc://'):]
job1.tasks[1] = server1.target[len('grpc://'):]
cluster_def2 = cluster_pb2.ClusterDef()
job2 = cluster_def2.job.add()
job2.name = 'worker2'
job2.tasks[0] = server2.target[len('grpc://'):]
job2.tasks[1] = server3.target[len('grpc://'):]
config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)
config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)
with ops.Graph().as_default() as g1:
with ops.device('/job:worker1/task:1'):
var1 = variables.Variable(array_ops.zeros([2]), name='var1')
update_op1 = state_ops.assign_add(
var1, array_ops.ones([2]), name='var1_assign_add')
init1 = variables.global_variables_initializer()
with ops.Graph().as_default() as g2:
with ops.device('/job:worker2/task:1'):
var2 = variables.Variable(array_ops.zeros([2]), name='var2')
update_op2 = state_ops.assign_add(
var2, array_ops.ones([2]), name='var2_assign_add')
init2 = variables.global_variables_initializer()
sess1 = session.Session(server2.target, graph=g1, config=config1)
sess2 = session.Session(server2.target, graph=g2, config=config2)
init1.run(session=sess1)
init2.run(session=sess2)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
self.assertAllEqual(expected_zeros, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess2.run(update_op2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))
def testClusterSpecPropagationThreeServers(self):
"""Boots 3 servers, creates 2 sessions, ensures appropriate operations.
We create 2 clusterspecs:
1. server2 as the master, server1 as a worker
2. server2 as the master, server3 as a worker
We ensure that variables on the workers are independent.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def1 = cluster_pb2.ClusterDef()
job1 = cluster_def1.job.add()
job1.name = 'worker'
job1.tasks[0] = server2.target[len('grpc://'):]
job1.tasks[1] = server1.target[len('grpc://'):]
cluster_def2 = cluster_pb2.ClusterDef()
job2 = cluster_def2.job.add()
job2.name = 'worker'
job2.tasks[0] = server2.target[len('grpc://'):]
job2.tasks[1] = server3.target[len('grpc://'):]
config1 = config_pb2.ConfigProto(cluster_def=cluster_def1)
config2 = config_pb2.ConfigProto(cluster_def=cluster_def2)
with ops.device('/job:worker/task:1'):
var = variables.Variable(array_ops.zeros([2]), name='var')
feed = array_ops.placeholder(dtypes.float32, shape=(2))
update_op = var.assign_add(feed)
sess1 = session.Session(server2.target, config=config1)
sess2 = session.Session(server2.target, config=config2)
variables.global_variables_initializer().run(session=sess1)
variables.global_variables_initializer().run(session=sess2)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
self.assertAllEqual(expected_zeros, sess1.run(var))
self.assertAllEqual(expected_zeros, sess2.run(var))
self.assertAllEqual(expected_ones,
sess1.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones, sess1.run(var))
self.assertAllEqual(expected_zeros, sess2.run(var))
self.assertAllEqual(expected_ones,
sess2.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones + expected_ones,
sess1.run(update_op, feed_dict={feed: expected_ones}))
self.assertAllEqual(expected_ones, sess2.run(var))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var))
def testClusterSpecPropagationThreeServersOneCluster(self):
"""Boots 3 servers, ensures appropriate communication across workers.
Additionally, in this cluster, we ensure the master is not the 0-th worker.
Note: this test only uses one session.
"""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
server3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server3.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
job.tasks[2] = server1.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
# Add ops to the devices in non-linear order.
with ops.device('/job:worker/task:1'):
feed1 = array_ops.placeholder(dtypes.float32, shape=(2))
const1 = constant_op.constant(2.0)
mul1 = const1 * feed1
with ops.device('/job:worker/task:2'):
feed2 = array_ops.placeholder(dtypes.float32, shape=(2))
const2 = constant_op.constant(2.0)
mul2 = const2 * feed2
with ops.device('/job:worker/task:0'):
feed0 = array_ops.placeholder(dtypes.float32, shape=(2))
const0 = constant_op.constant(2.0)
mul0 = const0 * feed0
sum_op = mul0 + mul1 + mul2
ones = np.ones([2])
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
# Run!
with session.Session(server1.target, config=config) as sess:
output = sess.run(
sum_op,
options=run_options,
run_metadata=run_metadata,
feed_dict={feed1: ones,
feed2: ones,
feed0: ones})
self.assertAllEqual(6 * ones, output)
self.assertEqual(
3,
len([
dev_stats.device
for dev_stats in run_metadata.step_stats.dev_stats
for node_stats in dev_stats.node_stats
if '/job:worker/replica:0/task:' in dev_stats.device and
node_stats.node_name.startswith('Const')
]), run_metadata)
def testClusterSpecPropagationIsolation(self):
"""Test that two sessions using ClusterSpec propagation are isolated."""
server = server_lib.Server.create_local_server()
init_value = array_ops.placeholder(dtypes.int32, shape=[])
v = variables.Variable(init_value)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
sess1 = session.Session(server.target, config=config)
sess2 = session.Session(server.target, config=config)
# Initially, the variable is uninitialized in both sessions.
with self.assertRaises(errors.FailedPreconditionError):
sess1.run(v)
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess1 should be visible in sess1 only.
sess1.run(v.initializer, feed_dict={init_value: 37})
self.assertEqual(37, sess1.run(v))
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess2 should be visible in sess2 only.
sess2.run(v.initializer, feed_dict={init_value: 86})
self.assertEqual(37, sess1.run(v))
self.assertEqual(86, sess2.run(v))
# Closing sess2 has no effect on the state of sess1.
sess2.close()
self.assertEqual(37, sess1.run(v))
# Subsequent sessions will not see the state of existing sessions.
sess3 = session.Session(server.target, config=config)
self.assertEqual(37, sess1.run(v))
with self.assertRaises(errors.FailedPreconditionError):
sess3.run(v)
def testClusterSpecPropagationNonIsolation(self):
"""Test that two sessions using ClusterSpec propagation shares state.
For example, the updated Variable value are visible among all worker
sessions registered in the same server.
"""
server = server_lib.Server.create_local_server()
init_value = array_ops.placeholder(dtypes.int32, shape=[])
v = variables.Variable(init_value)
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
config.experimental.share_session_state_in_clusterspec_propagation = True
sess1 = session.Session(server.target, config=config)
sess2 = session.Session(server.target, config=config)
# Initially, the variable is uninitialized in both sessions.
with self.assertRaises(errors.FailedPreconditionError):
sess1.run(v)
with self.assertRaises(errors.FailedPreconditionError):
sess2.run(v)
# An update in sess1 should be visible in sess2.
sess1.run(v.initializer, feed_dict={init_value: 37})
self.assertEqual(37, sess1.run(v))
self.assertEqual(37, sess2.run(v))
# Closing sess2 has no effect on the state of sess1.
sess2.close()
self.assertEqual(37, sess1.run(v))
# Subsequent sessions should see the state of existing sessions.
sess3 = session.Session(server.target, config=config)
self.assertEqual(37, sess1.run(v))
self.assertEqual(37, sess3.run(v))
def testClusterSpecPropagationNonIsolation2Graphs(self):
"""Creates 2 sessions with each own graph, ensures appropriate operations.
We ensure that variables on the workers shares state.
"""
server = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
config.experimental.share_session_state_in_clusterspec_propagation = True
with ops.Graph().as_default() as g1:
var1 = variables.Variable(array_ops.zeros([2]), name='var')
update_op1 = state_ops.assign_add(
var1, array_ops.ones([2]), name='var1_assign_add')
init1 = variables.global_variables_initializer()
with ops.Graph().as_default() as g2:
var2 = variables.Variable(array_ops.zeros([2]), name='var')
update_op2 = state_ops.assign_add(
var2, array_ops.ones([2]), name='var2_assign_add')
sess1 = session.Session(server.target, graph=g1, config=config)
sess2 = session.Session(server.target, graph=g2, config=config)
expected_zeros = np.zeros([2])
expected_ones = np.ones([2])
init1.run(session=sess1)
self.assertAllEqual(expected_zeros, sess1.run(var1))
self.assertAllEqual(expected_zeros, sess2.run(var2))
self.assertAllEqual(expected_ones, sess1.run(update_op1))
self.assertAllEqual(expected_ones, sess1.run(var1))
self.assertAllEqual(expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess2.run(update_op2))
self.assertAllEqual(expected_ones + expected_ones, sess2.run(var2))
self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1))
def testClusterSpecPropagationPartialRun(self):
"""Test successful partial run with ClusterSpec propagation."""
server1 = server_lib.Server.create_local_server()
server2 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = 'worker'
job.tasks[0] = server1.target[len('grpc://'):]
job.tasks[1] = server2.target[len('grpc://'):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with ops.device('/job:worker/task:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
with ops.device('/job:worker/task:1'):
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
with ops.device('/job:worker/task:0'):
r2 = math_ops.multiply(r1, c)
with session.Session(server1.target, config=config) as sess:
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
res = sess.partial_run(h, r2, feed_dict={c: 3})
self.assertEqual(9, res)
if __name__ == '__main__':
googletest.main()
|
SessionClusterSpecPropagationTest
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/generative.py
|
{
"start": 9597,
"end": 10476
}
|
class ____(_GenerativeConfigRuntime):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.NVIDIA, frozen=True, exclude=True
)
base_url: Optional[AnyHttpUrl]
max_tokens: Optional[int]
model: Optional[str]
temperature: Optional[float]
top_p: Optional[float]
def _to_grpc(self, opts: _GenerativeConfigRuntimeOptions) -> generative_pb2.GenerativeProvider:
self._validate_multi_modal(opts)
return generative_pb2.GenerativeProvider(
return_metadata=opts.return_metadata,
nvidia=generative_pb2.GenerativeNvidia(
base_url=_parse_anyhttpurl(self.base_url),
max_tokens=self.max_tokens,
model=self.model,
temperature=self.temperature,
top_p=self.top_p,
),
)
|
_GenerativeNvidia
|
python
|
rq__rq
|
tests/test_job_dependency.py
|
{
"start": 349,
"end": 23896
}
|
class ____(RQTestCase):
def test_dependency_parameter_constraints(self):
"""Ensures the proper constraints are in place for values passed in as job references."""
dep_job = Job.create(func=fixtures.say_hello, connection=self.connection)
# raise error on empty jobs
self.assertRaises(ValueError, Dependency, jobs=[])
# raise error on non-str/Job value in jobs iterable
self.assertRaises(ValueError, Dependency, jobs=[dep_job, 1])
def test_multiple_dependencies_are_accepted_and_persisted(self):
"""Ensure job._dependency_ids accepts different input formats, and
is set and restored properly"""
job_A = Job.create(func=fixtures.some_calculation, args=(3, 1, 4), id='A', connection=self.connection)
job_B = Job.create(func=fixtures.some_calculation, args=(2, 7, 2), id='B', connection=self.connection)
# No dependencies
job = Job.create(func=fixtures.say_hello, connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job._dependency_ids, [])
# Various ways of specifying dependencies
cases = [
['A', ['A']],
[job_A, ['A']],
[['A', 'B'], ['A', 'B']],
[[job_A, job_B], ['A', 'B']],
[['A', job_B], ['A', 'B']],
[('A', 'B'), ['A', 'B']],
[(job_A, job_B), ['A', 'B']],
[(job_A, 'B'), ['A', 'B']],
[Dependency('A'), ['A']],
[Dependency(job_A), ['A']],
[Dependency(['A', 'B']), ['A', 'B']],
[Dependency([job_A, job_B]), ['A', 'B']],
[Dependency(['A', job_B]), ['A', 'B']],
[Dependency(('A', 'B')), ['A', 'B']],
[Dependency((job_A, job_B)), ['A', 'B']],
[Dependency((job_A, 'B')), ['A', 'B']],
]
for given, expected in cases:
job = Job.create(func=fixtures.say_hello, depends_on=given, connection=self.connection)
job.save()
Job.fetch(job.id, connection=self.connection)
self.assertEqual(job._dependency_ids, expected)
def test_cleanup_expires_dependency_keys(self):
dependency_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependency_job.save()
dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
dependent_job.cleanup(ttl=100)
dependency_job.cleanup(ttl=100)
self.assertEqual(self.connection.ttl(dependent_job.dependencies_key), 100)
self.assertEqual(self.connection.ttl(dependency_job.dependents_key), 100)
def test_job_with_dependents_delete_parent(self):
"""job.delete() deletes itself from Redis but not dependents.
Wthout a save, the dependent job is never saved into redis. The delete
method will get and pass a NoSuchJobError.
"""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(fixtures.say_hello)
job2 = Job.create(
func=fixtures.say_hello, depends_on=job, serializer=JSONSerializer, connection=self.connection
)
job2.register_dependency()
job.delete()
self.assertFalse(self.connection.exists(job.key))
self.assertFalse(self.connection.exists(job.dependents_key))
# By default, dependents are not deleted, but The job is in redis only
# if it was saved!
self.assertFalse(self.connection.exists(job2.key))
self.assertNotIn(job.id, queue.get_job_ids())
def test_job_with_dependents_delete_parent_with_saved(self):
"""job.delete() deletes itself from Redis but not dependents. If the
dependent job was saved, it will remain in redis."""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(fixtures.say_hello)
job2 = Job.create(
func=fixtures.say_hello, depends_on=job, serializer=JSONSerializer, connection=self.connection
)
job2.register_dependency()
job2.save()
job.delete()
self.assertFalse(self.connection.exists(job.key))
self.assertFalse(self.connection.exists(job.dependents_key))
# By default, dependents are not deleted, but The job is in redis only
# if it was saved!
self.assertTrue(self.connection.exists(job2.key))
self.assertNotIn(job.id, queue.get_job_ids())
def test_job_with_dependents_deleteall(self):
"""job.delete() deletes itself from Redis. Dependents need to be
deleted explicitly."""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(fixtures.say_hello)
job2 = Job.create(
func=fixtures.say_hello, depends_on=job, serializer=JSONSerializer, connection=self.connection
)
job2.register_dependency()
job.delete(delete_dependents=True)
self.assertFalse(self.connection.exists(job.key))
self.assertFalse(self.connection.exists(job.dependents_key))
self.assertFalse(self.connection.exists(job2.key))
self.assertNotIn(job.id, queue.get_job_ids())
def test_job_with_dependents_delete_all_with_saved(self):
"""job.delete() deletes itself from Redis. Dependents need to be
deleted explictely. Without a save, the dependent job is never saved
into redis. The delete method will get and pass a NoSuchJobError.
"""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(fixtures.say_hello)
job2 = Job.create(
func=fixtures.say_hello,
depends_on=job,
serializer=JSONSerializer,
connection=self.connection,
status=JobStatus.QUEUED,
)
job2.register_dependency()
job2.save()
job.delete(delete_dependents=True)
self.assertFalse(self.connection.exists(job.key))
self.assertFalse(self.connection.exists(job.dependents_key))
self.assertFalse(self.connection.exists(job2.key))
self.assertNotIn(job.id, queue.get_job_ids())
def test_dependent_job_creates_dependencies_key(self):
queue = Queue(connection=self.connection)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
self.assertTrue(self.connection.exists(dependent_job.dependencies_key))
def test_dependent_job_deletes_dependencies_key(self):
"""
job.delete() deletes itself from Redis.
"""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(
func=fixtures.say_hello,
depends_on=dependency_job,
serializer=JSONSerializer,
connection=self.connection,
status=JobStatus.QUEUED,
)
dependent_job.register_dependency()
dependent_job.save()
dependent_job.delete()
self.assertTrue(self.connection.exists(dependency_job.key))
self.assertFalse(self.connection.exists(dependent_job.dependencies_key))
self.assertFalse(self.connection.exists(dependent_job.key))
def test_create_and_cancel_job_enqueue_dependents(self):
"""Ensure job.cancel() works properly with enqueue_dependents=True"""
queue = Queue(connection=self.connection)
dependency = queue.enqueue(fixtures.say_hello)
dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency)
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(1, len(queue.deferred_job_registry))
cancel_job(dependency.id, enqueue_dependents=True, connection=self.connection)
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(0, len(queue.deferred_job_registry))
registry = CanceledJobRegistry(connection=self.connection, queue=queue)
self.assertIn(dependency, registry)
self.assertEqual(dependency.get_status(), JobStatus.CANCELED)
self.assertIn(dependent, queue.get_jobs())
self.assertEqual(dependent.get_status(), JobStatus.QUEUED)
# If job is deleted, it's also removed from CanceledJobRegistry
dependency.delete()
self.assertNotIn(dependency, registry)
def test_create_and_cancel_job_enqueue_dependents_in_registry(self):
"""Ensure job.cancel() works properly with enqueue_dependents=True and when the job is in a registry"""
queue = Queue(connection=self.connection)
dependency = queue.enqueue(fixtures.raise_exc)
dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency)
print('# Post enqueue', self.connection.smembers(dependency.dependents_key))
self.assertTrue(dependency.dependent_ids)
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(1, len(queue.deferred_job_registry))
w = Worker([queue])
w.work(burst=True, max_jobs=1)
self.assertTrue(dependency.dependent_ids)
print('# Post work', self.connection.smembers(dependency.dependents_key))
dependency.refresh()
dependent.refresh()
self.assertEqual(0, len(queue.get_jobs()))
self.assertEqual(1, len(queue.deferred_job_registry))
self.assertEqual(1, len(queue.failed_job_registry))
print('# Pre cancel', self.connection.smembers(dependency.dependents_key))
cancel_job(dependency.id, enqueue_dependents=True, connection=self.connection)
dependency.refresh()
dependent.refresh()
print('#Post cancel', self.connection.smembers(dependency.dependents_key))
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(0, len(queue.deferred_job_registry))
self.assertEqual(0, len(queue.failed_job_registry))
self.assertEqual(1, len(queue.canceled_job_registry))
registry = CanceledJobRegistry(connection=self.connection, queue=queue)
self.assertIn(dependency, registry)
self.assertEqual(dependency.get_status(), JobStatus.CANCELED)
self.assertNotIn(dependency, queue.failed_job_registry)
self.assertIn(dependent, queue.get_jobs())
self.assertEqual(dependent.get_status(), JobStatus.QUEUED)
# If job is deleted, it's also removed from CanceledJobRegistry
dependency.delete()
self.assertNotIn(dependency, registry)
def test_create_and_cancel_job_enqueue_dependents_with_pipeline(self):
"""Ensure job.cancel() works properly with enqueue_dependents=True"""
queue = Queue(connection=self.connection)
dependency = queue.enqueue(fixtures.say_hello)
dependent = queue.enqueue(fixtures.say_hello, depends_on=dependency)
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(1, len(queue.deferred_job_registry))
self.connection.set('some:key', b'some:value')
with self.connection.pipeline() as pipe:
pipe.watch('some:key')
self.assertEqual(self.connection.get('some:key'), b'some:value')
dependency.cancel(pipeline=pipe, enqueue_dependents=True)
pipe.set('some:key', b'some:other:value')
pipe.execute()
self.assertEqual(self.connection.get('some:key'), b'some:other:value')
self.assertEqual(1, len(queue.get_jobs()))
self.assertEqual(0, len(queue.deferred_job_registry))
registry = CanceledJobRegistry(connection=self.connection, queue=queue)
self.assertIn(dependency, registry)
self.assertEqual(dependency.get_status(), JobStatus.CANCELED)
self.assertIn(dependent, queue.get_jobs())
self.assertEqual(dependent.get_status(), JobStatus.QUEUED)
# If job is deleted, it's also removed from CanceledJobRegistry
dependency.delete()
self.assertNotIn(dependency, registry)
def test_canceling_job_removes_it_from_dependency_dependents_key(self):
"""Cancel child jobs and verify their IDs are removed from the parent's dependents_key."""
connection = self.connection
queue = Queue(connection=connection)
parent_job = queue.enqueue(fixtures.say_hello, job_id='parent_job')
child_job_1 = queue.enqueue(fixtures.say_hello, depends_on=parent_job, job_id='child_job_1')
child_job_2 = queue.enqueue(fixtures.say_hello, depends_on=parent_job, job_id='child_job_2')
self.assertEqual(set(parent_job.dependent_ids), {child_job_1.id, child_job_2.id})
child_job_1.cancel(remove_from_dependencies=True)
self.assertEqual(set(parent_job.dependent_ids), {child_job_2.id})
# child_job_2 still in dependents_key since remove_from_dependencies = False
child_job_2.cancel(remove_from_dependencies=False)
self.assertEqual(set(parent_job.dependent_ids), {child_job_2.id})
def test_dependents_key_for_should_return_prefixed_job_id(self):
"""test redis key to store job dependents hash under"""
job_id = 'random'
key = Job.dependents_key_for(job_id=job_id)
assert key == Job.redis_job_namespace_prefix + job_id + ':dependents'
def test_dependencies_key_should_have_prefixed_job_id(self):
job_id = 'random'
job = Job(id=job_id, connection=self.connection)
expected_key = Job.redis_job_namespace_prefix + ':' + job_id + ':dependencies'
assert job.dependencies_key == expected_key
def test_fetch_dependencies_returns_dependency_jobs(self):
queue = Queue(connection=self.connection)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
dependencies = dependent_job.fetch_dependencies(pipeline=self.connection)
self.assertListEqual(dependencies, [dependency_job])
def test_fetch_dependencies_returns_empty_if_not_dependent_job(self):
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
dependencies = dependent_job.fetch_dependencies(pipeline=self.connection)
self.assertListEqual(dependencies, [])
def test_fetch_dependencies_raises_if_dependency_deleted(self):
queue = Queue(connection=self.connection)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
dependency_job.delete()
self.assertNotIn(
dependent_job.id, [job.id for job in dependent_job.fetch_dependencies(pipeline=self.connection)]
)
def test_fetch_dependencies_watches(self):
queue = Queue(connection=self.connection)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(func=fixtures.say_hello, depends_on=dependency_job, connection=self.connection)
dependent_job.register_dependency()
dependent_job.save()
with self.connection.pipeline() as pipeline:
dependent_job.fetch_dependencies(watch=True, pipeline=pipeline)
pipeline.multi()
with self.assertRaises(WatchError):
self.connection.set(Job.key_for(dependency_job.id), 'somethingelsehappened')
pipeline.touch(dependency_job.id)
pipeline.execute()
def test_dependencies_finished_returns_false_if_dependencies_queued(self):
queue = Queue(connection=self.connection)
dependency_job_ids = [queue.enqueue(fixtures.say_hello).id for _ in range(5)]
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job._dependency_ids = dependency_job_ids
dependent_job.register_dependency()
dependencies_finished = dependent_job.dependencies_are_met()
self.assertFalse(dependencies_finished)
def test_dependencies_finished_returns_true_if_no_dependencies(self):
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job.register_dependency()
dependencies_finished = dependent_job.dependencies_are_met()
self.assertTrue(dependencies_finished)
def test_dependencies_finished_returns_true_if_all_dependencies_finished(self):
dependency_jobs = [Job.create(fixtures.say_hello, connection=self.connection) for _ in range(5)]
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job._dependency_ids = [job.id for job in dependency_jobs]
dependent_job.register_dependency()
right_now = now()
# Set ended_at timestamps
for i, job in enumerate(dependency_jobs):
job._status = JobStatus.FINISHED
job.ended_at = right_now - timedelta(seconds=i)
job.save()
dependencies_finished = dependent_job.dependencies_are_met()
self.assertTrue(dependencies_finished)
def test_dependencies_finished_returns_false_if_unfinished_job(self):
dependency_jobs = [Job.create(fixtures.say_hello, connection=self.connection) for _ in range(2)]
dependency_jobs[0]._status = JobStatus.FINISHED
dependency_jobs[0].ended_at = now()
dependency_jobs[0].save()
dependency_jobs[1]._status = JobStatus.STARTED
dependency_jobs[1].ended_at = None
dependency_jobs[1].save()
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job._dependency_ids = [job.id for job in dependency_jobs]
dependent_job.register_dependency()
dependencies_finished = dependent_job.dependencies_are_met()
self.assertFalse(dependencies_finished)
def test_dependencies_finished_watches_job(self):
queue = Queue(connection=self.connection)
dependency_job = queue.enqueue(fixtures.say_hello)
dependent_job = Job.create(func=fixtures.say_hello, connection=self.connection)
dependent_job._dependency_ids = [dependency_job.id]
dependent_job.register_dependency()
with self.connection.pipeline() as pipeline:
dependent_job.dependencies_are_met(
pipeline=pipeline,
)
dependency_job.set_status(JobStatus.FAILED, pipeline=self.connection)
pipeline.multi()
with self.assertRaises(WatchError):
pipeline.touch(Job.key_for(dependent_job.id))
pipeline.execute()
def test_execution_order_with_sole_dependency(self):
queue = Queue(connection=self.connection)
key = 'test_job:job_order'
connection_kwargs = self.connection.connection_pool.connection_kwargs
# When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued.
# Worker 1 will be busy with the slow job, so worker 2 will complete both fast jobs.
job_slow = queue.enqueue(fixtures.rpush, args=[key, 'slow', connection_kwargs, True, 0.5], job_id='slow_job')
job_A = queue.enqueue(fixtures.rpush, args=[key, 'A', connection_kwargs, True])
job_B = queue.enqueue(fixtures.rpush, args=[key, 'B', connection_kwargs, True])
fixtures.burst_two_workers(queue, connection=self.connection)
time.sleep(0.75)
jobs_completed = [v.decode() for v in self.connection.lrange(key, 0, 2)]
self.assertEqual(queue.count, 0)
self.assertTrue(all(job.is_finished for job in [job_slow, job_A, job_B]))
self.assertEqual(jobs_completed, ['A:w2', 'B:w2', 'slow:w1'])
self.connection.delete(key)
# When job "A" depends on the slow job, then job "B" finishes before "A".
# There is no clear requirement on which worker should take job "A", so we stay silent on that.
job_slow = queue.enqueue(fixtures.rpush, args=[key, 'slow', connection_kwargs, True, 0.5], job_id='slow_job')
job_A = queue.enqueue(fixtures.rpush, args=[key, 'A', connection_kwargs, False], depends_on='slow_job')
job_B = queue.enqueue(fixtures.rpush, args=[key, 'B', connection_kwargs, True])
fixtures.burst_two_workers(queue, connection=self.connection)
time.sleep(0.75)
jobs_completed = [v.decode() for v in self.connection.lrange(key, 0, 2)]
self.assertEqual(queue.count, 0)
self.assertTrue(all(job.is_finished for job in [job_slow, job_A, job_B]))
self.assertEqual(jobs_completed, ['B:w2', 'slow:w1', 'A'])
def test_execution_order_with_dual_dependency(self):
"""Test that jobs with dependencies are executed in the correct order."""
queue = Queue(connection=self.connection)
key = 'test_job:job_order'
connection_kwargs = self.connection.connection_pool.connection_kwargs
# When there are no dependencies, the two fast jobs ("A" and "B") run in the order enqueued.
job_slow_1 = queue.enqueue(fixtures.rpush, args=[key, 'slow_1', connection_kwargs, True, 0.5], job_id='slow_1')
job_slow_2 = queue.enqueue(fixtures.rpush, args=[key, 'slow_2', connection_kwargs, True, 0.75], job_id='slow_2')
job_A = queue.enqueue(fixtures.rpush, args=[key, 'A', connection_kwargs, True])
job_B = queue.enqueue(fixtures.rpush, args=[key, 'B', connection_kwargs, True])
fixtures.burst_two_workers(queue, connection=self.connection)
time.sleep(1)
jobs_completed = [v.decode() for v in self.connection.lrange(key, 0, 3)]
self.assertEqual(queue.count, 0)
self.assertTrue(all(job.is_finished for job in [job_slow_1, job_slow_2, job_A, job_B]))
self.assertEqual(jobs_completed, ['slow_1:w1', 'A:w1', 'B:w1', 'slow_2:w2'])
self.connection.delete(key)
# This time job "A" depends on two slow jobs, while job "B" depends only on the faster of
# the two. Job "B" should be completed before job "A".
# There is no clear requirement on which worker should take job "A", so we stay silent on that.
job_slow_1 = queue.enqueue(fixtures.rpush, args=[key, 'slow_1', connection_kwargs, True, 0.5], job_id='slow_1')
job_slow_2 = queue.enqueue(fixtures.rpush, args=[key, 'slow_2', connection_kwargs, True, 0.75], job_id='slow_2')
job_A = queue.enqueue(
fixtures.rpush, args=[key, 'A', connection_kwargs, False], depends_on=['slow_1', 'slow_2']
)
job_B = queue.enqueue(fixtures.rpush, args=[key, 'B', connection_kwargs, True], depends_on=['slow_1'])
fixtures.burst_two_workers(queue, connection=self.connection)
time.sleep(1)
jobs_completed = [v.decode() for v in self.connection.lrange(key, 0, 3)]
self.assertEqual(queue.count, 0)
self.assertTrue(all(job.is_finished for job in [job_slow_1, job_slow_2, job_A, job_B]))
self.assertEqual(jobs_completed, ['slow_1:w1', 'B:w1', 'slow_2:w2', 'A'])
|
TestJobDependency
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/inference.py
|
{
"start": 568,
"end": 3322
}
|
class ____(NamedTuple):
"""The information about an input that can be inferred from the function signature."""
annotation: Any
description: Optional[str]
def _infer_input_description_from_docstring(fn: Callable[..., Any]) -> Mapping[str, Optional[str]]:
doc_str = fn.__doc__
if doc_str is None:
return {}
try:
docstring = parse(doc_str)
return {p.arg_name: p.description for p in docstring.params}
except Exception:
return {}
def _infer_output_description_from_docstring(fn: Callable[..., Any]) -> Optional[str]:
doc_str = fn.__doc__
if doc_str is None:
return None
try:
docstring = parse(doc_str)
if docstring.returns is None:
return None
return docstring.returns.description
except Exception:
return None
def infer_output_props(fn: Callable[..., Any]) -> InferredOutputProps:
type_hints = get_type_hints(fn)
annotation = (
type_hints["return"]
if not isgeneratorfunction(fn) and "return" in type_hints
else Parameter.empty
)
return InferredOutputProps(
annotation=annotation,
description=_infer_output_description_from_docstring(fn),
)
def has_explicit_return_type(fn: Callable[..., Any]) -> bool:
sig = signature(fn)
return sig.return_annotation is not Signature.empty
def _infer_inputs_from_params(
params: Sequence[Parameter],
type_hints: Mapping[str, object],
descriptions: Optional[Mapping[str, Optional[str]]] = None,
) -> Sequence[InferredInputProps]:
_descriptions: Mapping[str, Optional[str]] = descriptions or {}
input_defs = []
for param in params:
if param.default is not Parameter.empty:
input_def = InferredInputProps(
param.name,
type_hints.get(param.name, param.annotation),
default_value=param.default,
description=_descriptions.get(param.name),
)
else:
input_def = InferredInputProps(
param.name,
type_hints.get(param.name, param.annotation),
description=_descriptions.get(param.name),
)
input_defs.append(input_def)
return input_defs
def infer_input_props(
fn: Callable[..., Any], context_arg_provided: bool
) -> Sequence[InferredInputProps]:
sig = signature(fn)
params = list(sig.parameters.values())
type_hints = get_type_hints(fn)
descriptions = _infer_input_description_from_docstring(fn)
params_to_infer = params[1:] if context_arg_provided else params
defs = _infer_inputs_from_params(params_to_infer, type_hints, descriptions=descriptions)
return defs
|
InferredOutputProps
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/scalarfloat.py
|
{
"start": 3694,
"end": 3913
}
|
class ____(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
|
ExponentialFloat
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
{
"start": 5067,
"end": 6003
}
|
class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
self.conv_layers = nn.ModuleList(
[Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
|
Data2VecAudioFeatureEncoder
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py
|
{
"start": 5088,
"end": 5207
}
|
class ____(ParentI):
def f(self):
builtins.super(ChildI1, self).f() # no __class__ in the local scope
|
ChildI1
|
python
|
pypa__pip
|
src/pip/_vendor/rich/progress_bar.py
|
{
"start": 458,
"end": 8154
}
|
class ____(JupyterMixin):
"""Renders a (progress) bar. Used by rich.progress.
Args:
total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
completed (float, optional): Number of steps completed. Defaults to 0.
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
"""
def __init__(
self,
total: Optional[float] = 100.0,
completed: float = 0,
width: Optional[int] = None,
pulse: bool = False,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
pulse_style: StyleType = "bar.pulse",
animation_time: Optional[float] = None,
):
self.total = total
self.completed = completed
self.width = width
self.pulse = pulse
self.style = style
self.complete_style = complete_style
self.finished_style = finished_style
self.pulse_style = pulse_style
self.animation_time = animation_time
self._pulse_segments: Optional[List[Segment]] = None
def __repr__(self) -> str:
return f"<Bar {self.completed!r} of {self.total!r}>"
@property
def percentage_completed(self) -> Optional[float]:
"""Calculate percentage complete."""
if self.total is None:
return None
completed = (self.completed / self.total) * 100.0
completed = min(100, max(0.0, completed))
return completed
@lru_cache(maxsize=16)
def _get_pulse_segments(
self,
fore_style: Style,
back_style: Style,
color_system: str,
no_color: bool,
ascii: bool = False,
) -> List[Segment]:
"""Get a list of segments to render a pulse animation.
Returns:
List[Segment]: A list of segments, one segment per character.
"""
bar = "-" if ascii else "β"
segments: List[Segment] = []
if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
segments += [Segment(" " if no_color else bar, back_style)] * (
PULSE_SIZE - (PULSE_SIZE // 2)
)
return segments
append = segments.append
fore_color = (
fore_style.color.get_truecolor()
if fore_style.color
else ColorTriplet(255, 0, 255)
)
back_color = (
back_style.color.get_truecolor()
if back_style.color
else ColorTriplet(0, 0, 0)
)
cos = math.cos
pi = math.pi
_Segment = Segment
_Style = Style
from_triplet = Color.from_triplet
for index in range(PULSE_SIZE):
position = index / PULSE_SIZE
fade = 0.5 + cos(position * pi * 2) / 2.0
color = blend_rgb(fore_color, back_color, cross_fade=fade)
append(_Segment(bar, _Style(color=from_triplet(color))))
return segments
def update(self, completed: float, total: Optional[float] = None) -> None:
"""Update progress with new values.
Args:
completed (float): Number of steps completed.
total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
"""
self.completed = completed
self.total = total if total is not None else self.total
def _render_pulse(
self, console: Console, width: int, ascii: bool = False
) -> Iterable[Segment]:
"""Renders the pulse animation.
Args:
console (Console): Console instance.
width (int): Width in characters of pulse animation.
Returns:
RenderResult: [description]
Yields:
Iterator[Segment]: Segments to render pulse
"""
fore_style = console.get_style(self.pulse_style, default="white")
back_style = console.get_style(self.style, default="black")
pulse_segments = self._get_pulse_segments(
fore_style, back_style, console.color_system, console.no_color, ascii=ascii
)
segment_count = len(pulse_segments)
current_time = (
monotonic() if self.animation_time is None else self.animation_time
)
segments = pulse_segments * (int(width / segment_count) + 2)
offset = int(-current_time * 15) % segment_count
segments = segments[offset : offset + width]
yield from segments
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = min(self.width or options.max_width, options.max_width)
ascii = options.legacy_windows or options.ascii_only
should_pulse = self.pulse or self.total is None
if should_pulse:
yield from self._render_pulse(console, width, ascii=ascii)
return
completed: Optional[float] = (
min(self.total, max(0, self.completed)) if self.total is not None else None
)
bar = "-" if ascii else "β"
half_bar_right = " " if ascii else "βΈ"
half_bar_left = " " if ascii else "βΊ"
complete_halves = (
int(width * 2 * completed / self.total)
if self.total and completed is not None
else width * 2
)
bar_count = complete_halves // 2
half_bar_count = complete_halves % 2
style = console.get_style(self.style)
is_finished = self.total is None or self.completed >= self.total
complete_style = console.get_style(
self.finished_style if is_finished else self.complete_style
)
_Segment = Segment
if bar_count:
yield _Segment(bar * bar_count, complete_style)
if half_bar_count:
yield _Segment(half_bar_right * half_bar_count, complete_style)
if not console.no_color:
remaining_bars = width - bar_count - half_bar_count
if remaining_bars and console.color_system is not None:
if not half_bar_count and bar_count:
yield _Segment(half_bar_left, style)
remaining_bars -= 1
if remaining_bars:
yield _Segment(bar * remaining_bars, style)
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return (
Measurement(self.width, self.width)
if self.width is not None
else Measurement(4, options.max_width)
)
if __name__ == "__main__": # pragma: no cover
console = Console()
bar = ProgressBar(width=50, total=100)
import time
console.show_cursor(False)
for n in range(0, 101, 1):
bar.update(n)
console.print(bar)
console.file.write("\r")
time.sleep(0.05)
console.show_cursor(True)
console.print()
|
ProgressBar
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/batch.py
|
{
"start": 2112,
"end": 17276
}
|
class ____(AwsBaseOperator[BatchClientHook]):
"""
Execute a job on AWS Batch.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BatchOperator`
:param job_name: the name for the job that will run on AWS Batch (templated)
:param job_definition: the job definition name on AWS Batch
:param job_queue: the queue name on AWS Batch
:param container_overrides: the `containerOverrides` parameter for boto3 (templated)
:param ecs_properties_override: the `ecsPropertiesOverride` parameter for boto3 (templated)
:param eks_properties_override: the `eksPropertiesOverride` parameter for boto3 (templated)
:param node_overrides: the `nodeOverrides` parameter for boto3 (templated)
:param share_identifier: The share identifier for the job. Don't specify this parameter if the job queue
doesn't have a scheduling policy.
:param scheduling_priority_override: The scheduling priority for the job.
Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority.
This overrides any scheduling priority in the job definition
:param array_properties: the `arrayProperties` parameter for boto3
:param parameters: the `parameters` for boto3 (templated)
:param job_id: the job ID, usually unknown (None) until the
submit_job operation gets the jobId defined by AWS Batch
:param waiters: an :py:class:`.BatchWaiters` object (see note below);
if None, polling is used with max_retries and status_retries.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param tags: collection of tags to apply to the AWS Batch job submission
if None, no tags are submitted
:param deferrable: Run operator in the deferrable mode.
:param awslogs_enabled: Specifies whether logs from CloudWatch
should be printed or not, False.
If it is an array job, only the logs of the first task will be printed.
:param awslogs_fetch_interval: The interval with which cloudwatch logs are to be fetched, 30 sec.
:param poll_interval: (Deferrable mode only) Time in seconds to wait between polling.
:param submit_job_timeout: Execution timeout in seconds for submitted batch job.
.. note::
Any custom waiters must return a waiter for these calls:
.. code-block:: python
waiter = waiters.get_waiter("JobExists")
waiter = waiters.get_waiter("JobRunning")
waiter = waiters.get_waiter("JobComplete")
"""
aws_hook_class = BatchClientHook
ui_color = "#c3dae0"
arn: str | None = None
template_fields: Sequence[str] = aws_template_fields(
"job_id",
"job_name",
"job_definition",
"job_queue",
"container_overrides",
"array_properties",
"ecs_properties_override",
"eks_properties_override",
"node_overrides",
"parameters",
"retry_strategy",
"waiters",
"tags",
"wait_for_completion",
"awslogs_enabled",
"awslogs_fetch_interval",
)
template_fields_renderers = {
"container_overrides": "json",
"parameters": "json",
"ecs_properties_override": "json",
"eks_properties_override": "json",
"node_overrides": "json",
"retry_strategy": "json",
}
operator_extra_links = (
BatchJobDetailsLink(),
BatchJobDefinitionLink(),
BatchJobQueueLink(),
CloudWatchEventsLink(),
)
def __init__(
self,
*,
job_name: str,
job_definition: str,
job_queue: str,
container_overrides: dict | None = None,
array_properties: dict | None = None,
ecs_properties_override: dict | None = None,
eks_properties_override: dict | None = None,
node_overrides: dict | None = None,
share_identifier: str | None = None,
scheduling_priority_override: int | None = None,
parameters: dict | None = None,
retry_strategy: dict | None = None,
job_id: str | None = None,
waiters: Any | None = None,
max_retries: int = 4200,
status_retries: int | None = None,
tags: dict | None = None,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 30,
awslogs_enabled: bool = False,
awslogs_fetch_interval: timedelta = timedelta(seconds=30),
submit_job_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.job_name = job_name
self.job_definition = job_definition
self.job_queue = job_queue
self.container_overrides = container_overrides
self.ecs_properties_override = ecs_properties_override
self.eks_properties_override = eks_properties_override
self.node_overrides = node_overrides
self.share_identifier = share_identifier
self.scheduling_priority_override = scheduling_priority_override
self.array_properties = array_properties
self.parameters = parameters or {}
self.retry_strategy = retry_strategy
self.waiters = waiters
self.tags = tags or {}
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.poll_interval = poll_interval
self.awslogs_enabled = awslogs_enabled
self.awslogs_fetch_interval = awslogs_fetch_interval
self.submit_job_timeout = submit_job_timeout
# params for hook
self.max_retries = max_retries
self.status_retries = status_retries
@property
def _hook_parameters(self):
return {
**super()._hook_parameters,
"max_retries": self.max_retries,
"status_retries": self.status_retries,
}
def execute(self, context: Context) -> str | None:
"""
Submit and monitor an AWS Batch job.
:raises: AirflowException
"""
self.submit_job(context)
if self.deferrable:
if not self.job_id:
raise AirflowException("AWS Batch job - job_id was not found")
job = self.hook.get_job_description(self.job_id)
job_status = job.get("status")
if job_status == self.hook.SUCCESS_STATE:
self.log.info("Job completed.")
return self.job_id
if job_status == self.hook.FAILURE_STATE:
raise AirflowException(f"Error while running job: {self.job_id} is in {job_status} state")
if job_status in self.hook.INTERMEDIATE_STATES:
self.defer(
timeout=self.execution_timeout,
trigger=BatchJobTrigger(
job_id=self.job_id,
waiter_max_attempts=self.max_retries,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_delay=self.poll_interval,
),
method_name="execute_complete",
)
raise AirflowException(f"Unexpected status: {job_status}")
if self.wait_for_completion:
self.monitor_job(context)
return self.job_id
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error while running job: {validated_event}")
self.job_id = validated_event["job_id"]
# Fetch logs if awslogs_enabled
if self.awslogs_enabled:
self.monitor_job(context) # fetch logs, no need to return
self.log.info("Job completed successfully for job_id: %s", self.job_id)
return self.job_id
def on_kill(self):
response = self.hook.client.terminate_job(jobId=self.job_id, reason="Task killed by the user")
self.log.info("AWS Batch job (%s) terminated: %s", self.job_id, response)
def submit_job(self, context: Context):
"""
Submit an AWS Batch job.
:raises: AirflowException
"""
self.log.info(
"Running AWS Batch job - job definition: %s - on queue %s",
self.job_definition,
self.job_queue,
)
if self.container_overrides:
self.log.info("AWS Batch job - container overrides: %s", self.container_overrides)
if self.array_properties:
self.log.info("AWS Batch job - array properties: %s", self.array_properties)
if self.ecs_properties_override:
self.log.info("AWS Batch job - ECS properties: %s", self.ecs_properties_override)
if self.eks_properties_override:
self.log.info("AWS Batch job - EKS properties: %s", self.eks_properties_override)
if self.node_overrides:
self.log.info("AWS Batch job - node properties: %s", self.node_overrides)
args = {
"jobName": self.job_name,
"jobQueue": self.job_queue,
"jobDefinition": self.job_definition,
"arrayProperties": self.array_properties,
"parameters": self.parameters,
"tags": self.tags,
"containerOverrides": self.container_overrides,
"ecsPropertiesOverride": self.ecs_properties_override,
"eksPropertiesOverride": self.eks_properties_override,
"nodeOverrides": self.node_overrides,
"retryStrategy": self.retry_strategy,
"shareIdentifier": self.share_identifier,
"schedulingPriorityOverride": self.scheduling_priority_override,
}
if self.submit_job_timeout:
args["timeout"] = {"attemptDurationSeconds": self.submit_job_timeout}
try:
response = self.hook.client.submit_job(**trim_none_values(args))
except Exception as e:
self.log.error(
"AWS Batch job failed submission - job definition: %s - on queue %s",
self.job_definition,
self.job_queue,
)
raise AirflowException(e)
self.job_id = response["jobId"]
self.log.info("AWS Batch job (%s) started: %s", self.job_id, response)
BatchJobDetailsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_id=self.job_id,
)
def monitor_job(self, context: Context):
"""
Monitor an AWS Batch job.
This can raise an exception or an AirflowTaskTimeout if the task was
created with ``execution_timeout``.
"""
if not self.job_id:
raise AirflowException("AWS Batch job - job_id was not found")
try:
job_desc = self.hook.get_job_description(self.job_id)
job_definition_arn = job_desc["jobDefinition"]
job_queue_arn = job_desc["jobQueue"]
self.log.info(
"AWS Batch job (%s) Job Definition ARN: %r, Job Queue ARN: %r",
self.job_id,
job_definition_arn,
job_queue_arn,
)
except KeyError:
self.log.warning("AWS Batch job (%s) can't get Job Definition ARN and Job Queue ARN", self.job_id)
else:
BatchJobDefinitionLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_definition_arn=job_definition_arn,
)
BatchJobQueueLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_queue_arn=job_queue_arn,
)
if self.awslogs_enabled:
if self.waiters:
self.waiters.wait_for_job(self.job_id, get_batch_log_fetcher=self._get_batch_log_fetcher)
else:
self.hook.wait_for_job(self.job_id, get_batch_log_fetcher=self._get_batch_log_fetcher)
else:
if self.waiters:
self.waiters.wait_for_job(self.job_id)
else:
self.hook.wait_for_job(self.job_id)
awslogs = []
try:
awslogs = self.hook.get_job_all_awslogs_info(self.job_id)
except AirflowException as ae:
self.log.warning("Cannot determine where to find the AWS logs for this Batch job: %s", ae)
if awslogs:
self.log.info("AWS Batch job (%s) CloudWatch Events details found. Links to logs:", self.job_id)
link_builder = CloudWatchEventsLink()
for log in awslogs:
self.log.info(link_builder.format_link(**log))
if len(awslogs) > 1:
# there can be several log streams on multi-node jobs
self.log.warning(
"out of all those logs, we can only link to one in the UI. Using the first one."
)
CloudWatchEventsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
**awslogs[0],
)
self.hook.check_job_success(self.job_id)
self.log.info("AWS Batch job (%s) succeeded", self.job_id)
def _get_batch_log_fetcher(self, job_id: str) -> AwsTaskLogFetcher | None:
awslog_info = self.hook.get_job_awslogs_info(job_id)
if not awslog_info:
return None
return AwsTaskLogFetcher(
aws_conn_id=self.aws_conn_id,
region_name=awslog_info["awslogs_region"],
log_group=awslog_info["awslogs_group"],
log_stream_name=awslog_info["awslogs_stream_name"],
fetch_interval=self.awslogs_fetch_interval,
logger=self.log,
)
|
BatchOperator
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 14627,
"end": 14730
}
|
class ____(CondaError):
def __init__(self, message: str):
super().__init__(message)
|
LinkError
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/datetimelike_/test_equals.py
|
{
"start": 374,
"end": 1286
}
|
class ____:
def test_not_equals_numeric(self, index):
assert not index.equals(Index(index.asi8))
assert not index.equals(Index(index.asi8.astype("u8")))
assert not index.equals(Index(index.asi8).astype("f8"))
def test_equals(self, index):
assert index.equals(index)
assert index.equals(index.astype(object))
assert index.equals(CategoricalIndex(index))
assert index.equals(CategoricalIndex(index.astype(object)))
def test_not_equals_non_arraylike(self, index):
assert not index.equals(list(index))
def test_not_equals_strings(self, index):
other = Index([str(x) for x in index], dtype=object)
assert not index.equals(other)
assert not index.equals(CategoricalIndex(other))
def test_not_equals_misc_strs(self, index):
other = Index(list("abc"))
assert not index.equals(other)
|
EqualsTests
|
python
|
django-import-export__django-import-export
|
tests/core/tests/test_results.py
|
{
"start": 1574,
"end": 5978
}
|
class ____(SimpleTestCase):
def setUp(self):
self.result = Result()
headers = ["id", "book_name"]
rows = [(1, "Some book")]
self.dataset = Dataset(*rows, headers=headers)
def test_add_dataset_headers(self):
target = ["some_header", "Error"]
self.result.add_dataset_headers(["some_header"])
self.assertEqual(target, self.result.failed_dataset.headers)
def test_add_dataset_headers_empty_list(self):
target = ["Error"]
self.result.add_dataset_headers([])
self.assertEqual(target, self.result.failed_dataset.headers)
def test_add_dataset_headers_None(self):
target = ["Error"]
self.result.add_dataset_headers(None)
self.assertEqual(target, self.result.failed_dataset.headers)
def test_result_append_failed_row_with_ValidationError(self):
target = [[1, "Some book", "['some error']"]]
self.result.append_failed_row(
self.dataset.dict[0], ValidationError("some error")
)
self.assertEqual(target, self.result.failed_dataset.dict)
def test_result_append_failed_row_with_wrapped_error(self):
target = [[1, "Some book", "['some error']"]]
row_result = RowResult()
error = Error(ValidationError("some error"))
row_result.errors = [error]
self.result.append_failed_row(self.dataset.dict[0], row_result.errors[0])
self.assertEqual(target, self.result.failed_dataset.dict)
def test_add_instance_info_null_instance(self):
row_result = RowResult()
row_result.add_instance_info(None)
self.assertEqual(None, row_result.object_id)
self.assertEqual(None, row_result.object_repr)
def test_add_instance_info_no_instance_pk(self):
row_result = RowResult()
row_result.add_instance_info(Book())
self.assertEqual(None, row_result.object_id)
self.assertEqual("", row_result.object_repr)
def test_add_instance_info(self):
row_result = RowResult()
row_result.add_instance_info(Book(pk=1, name="some book"))
self.assertEqual(1, row_result.object_id)
self.assertEqual("some book", row_result.object_repr)
@patch("import_export.results.logger")
def test_add_instance_info_instance_unserializable(self, mock_logger):
# issue 1763
class UnserializableBook:
# will raise TypeError
def __str__(self):
return None
row_result = RowResult()
row_result.add_instance_info(UnserializableBook())
mock_logger.debug.assert_called_with(
"call to force_str() on instance failed: "
"__str__ returned non-string (type NoneType)"
)
self.assertEqual(None, row_result.object_repr)
def test_is_new(self):
row_result = RowResult()
self.assertFalse(row_result.is_new())
row_result.import_type = RowResult.IMPORT_TYPE_NEW
self.assertTrue(row_result.is_new())
self.assertTrue(row_result.is_valid())
def test_is_update(self):
row_result = RowResult()
self.assertFalse(row_result.is_update())
row_result.import_type = RowResult.IMPORT_TYPE_UPDATE
self.assertTrue(row_result.is_update())
self.assertTrue(row_result.is_valid())
def test_is_skip(self):
row_result = RowResult()
self.assertFalse(row_result.is_skip())
row_result.import_type = RowResult.IMPORT_TYPE_SKIP
self.assertTrue(row_result.is_skip())
self.assertTrue(row_result.is_valid())
def test_is_delete(self):
row_result = RowResult()
self.assertFalse(row_result.is_delete())
row_result.import_type = RowResult.IMPORT_TYPE_DELETE
self.assertTrue(row_result.is_delete())
self.assertTrue(row_result.is_valid())
def test_is_error(self):
row_result = RowResult()
self.assertFalse(row_result.is_error())
row_result.import_type = RowResult.IMPORT_TYPE_ERROR
self.assertTrue(row_result.is_error())
self.assertFalse(row_result.is_valid())
def test_is_invalid(self):
row_result = RowResult()
self.assertFalse(row_result.is_invalid())
row_result.import_type = RowResult.IMPORT_TYPE_INVALID
self.assertTrue(row_result.is_invalid())
self.assertFalse(row_result.is_valid())
|
ResultTest
|
python
|
xlwings__xlwings
|
xlwings/pro/_xlcalamine.py
|
{
"start": 13642,
"end": 14273
}
|
class ____(base_classes.Name):
def __init__(self, parent, api):
self.parent = parent # only implemented for Book, not Sheet
self.api = api
@property
def name(self):
return self.api["name"]
@property
def refers_to(self):
sheet_name = self.parent.sheets(self.api["sheet_index"] + 1).name
sheet_name = f"'{sheet_name}'" if " " in sheet_name else sheet_name
return f"={sheet_name}!{self.api['address']}"
@property
def refers_to_range(self):
return self.parent.sheets(self.api["sheet_index"] + 1).range(
self.api["address"]
)
|
Name
|
python
|
joblib__joblib
|
joblib/test/test_hashing.py
|
{
"start": 1276,
"end": 1336
}
|
class ____(object):
def f(self, x):
return x
|
Klass
|
python
|
python-openxml__python-docx
|
tests/opc/unitdata/rels.py
|
{
"start": 614,
"end": 1654
}
|
class ____:
"""Builder class for test Relationships"""
partname_tmpls = {
RT.SLIDE_MASTER: "/ppt/slideMasters/slideMaster%d.xml",
RT.SLIDE: "/ppt/slides/slide%d.xml",
}
def __init__(self):
self.relationships = []
self.next_rel_num = 1
self.next_partnums = {}
def _next_partnum(self, reltype):
if reltype not in self.next_partnums:
self.next_partnums[reltype] = 1
partnum = self.next_partnums[reltype]
self.next_partnums[reltype] = partnum + 1
return partnum
@property
def next_rId(self):
rId = "rId%d" % self.next_rel_num
self.next_rel_num += 1
return rId
def _next_tuple_partname(self, reltype):
partname_tmpl = self.partname_tmpls[reltype]
partnum = self._next_partnum(reltype)
return partname_tmpl % partnum
def build(self):
rels = Relationships()
for rel in self.relationships:
rels.add_rel(rel)
return rels
|
RelationshipsBuilder
|
python
|
getsentry__sentry
|
tests/sentry/snuba/test_models.py
|
{
"start": 987,
"end": 5430
}
|
class ____(TestCase):
def setUp(self) -> None:
self.snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
Dataset.Events,
"release:123",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
[SnubaQueryEventType.EventType.DEFAULT, SnubaQueryEventType.EventType.ERROR],
)
self.subscription = create_snuba_subscription(
self.project,
"test_data_source_handler",
self.snuba_query,
)
self.data_source = self.create_data_source(
type="snuba_query_subscription",
source_id=str(self.subscription.id),
)
def test_bulk_get_query_object(self) -> None:
result = QuerySubscriptionDataSourceHandler.bulk_get_query_object([self.data_source])
assert result[self.data_source.id] == self.subscription
def test_bulk_get_query_object__incorrect_data_source(self) -> None:
self.ds_with_invalid_subscription_id = self.create_data_source(
type="snuba_query_subscription",
source_id="not_int",
)
with mock.patch("sentry.snuba.models.logger.exception") as mock_logger:
data_sources = [self.data_source, self.ds_with_invalid_subscription_id]
result = QuerySubscriptionDataSourceHandler.bulk_get_query_object(data_sources)
assert result[self.data_source.id] == self.subscription
mock_logger.assert_called_once_with(
"Invalid DataSource.source_id fetching subscriptions",
extra={
"id": self.ds_with_invalid_subscription_id.id,
"source_id": self.ds_with_invalid_subscription_id.source_id,
},
)
def test_get_instance_limit(self) -> None:
with self.settings(MAX_QUERY_SUBSCRIPTIONS_PER_ORG=42):
assert QuerySubscriptionDataSourceHandler.get_instance_limit(self.organization) == 42
def test_get_instance_limit_with_override(self) -> None:
with self.settings(MAX_QUERY_SUBSCRIPTIONS_PER_ORG=42):
with self.options(
{
"metric_alerts.extended_max_subscriptions_orgs": [self.organization.id],
"metric_alerts.extended_max_subscriptions": 100,
}
):
assert (
QuerySubscriptionDataSourceHandler.get_instance_limit(self.organization) == 100
)
def test_get_current_instance_count(self) -> None:
new_org = self.create_organization()
new_project = self.create_project(organization=new_org)
new_project2 = self.create_project(organization=new_org)
# Create some subscriptions in different states
QuerySubscription.objects.create(
project=new_project,
type="active_sub",
snuba_query=self.snuba_query,
status=QuerySubscription.Status.ACTIVE.value,
)
QuerySubscription.objects.create(
project=new_project2,
type="creating_sub",
snuba_query=self.snuba_query,
status=QuerySubscription.Status.CREATING.value,
)
QuerySubscription.objects.create(
project=new_project,
type="updating_sub",
snuba_query=self.snuba_query,
status=QuerySubscription.Status.UPDATING.value,
)
QuerySubscription.objects.create(
project=new_project2,
type="disabled_sub",
snuba_query=self.snuba_query,
status=QuerySubscription.Status.DISABLED.value,
)
# Should count active, creating, and updating subscriptions
assert QuerySubscriptionDataSourceHandler.get_current_instance_count(new_org) == 3
# Create a subscription for a different org
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
QuerySubscription.objects.create(
project=other_project,
type="other_org_sub",
snuba_query=self.snuba_query,
status=QuerySubscription.Status.ACTIVE.value,
)
# Count should still be 3 as it only counts for the given org
assert QuerySubscriptionDataSourceHandler.get_current_instance_count(new_org) == 3
|
QuerySubscriptionDataSourceHandlerTest
|
python
|
django__django
|
tests/delete/models.py
|
{
"start": 6863,
"end": 7090
}
|
class ____(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
generic_delete_top = GenericForeignKey("content_type", "object_id")
|
GenericB1
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/engine/input_layer.py
|
{
"start": 1643,
"end": 16072
}
|
class ____(base_layer.Layer):
"""Layer to be used as an entry point into a Network (a graph of layers).
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create a placeholder tensor (pass arguments `input_shape`, and
optionally, `dtype`).
It is generally recommend to use the functional layer API via `Input`,
(which creates an `InputLayer`) without directly using `InputLayer`.
When using InputLayer with Keras Sequential model, it can be skipped by
moving the input_shape parameter to the first layer after the InputLayer.
This class can create placeholders for tf.Tensors, tf.SparseTensors, and
tf.RaggedTensors by choosing 'sparse=True' or 'ragged=True'. Note that
'sparse' and 'ragged' can't be configured to True at same time.
Usage:
```python
# With explicit InputLayer.
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(4,)),
tf.keras.layers.Dense(8)])
model.compile(tf.optimizers.RMSprop(0.001), loss='mse')
model.fit(np.zeros((10, 4)),
np.ones((10, 8)))
# Without InputLayer and let the first layer to have the input_shape.
# Keras will add a input for the model behind the scene.
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, input_shape=(4,))])
model.compile(tf.optimizers.RMSprop(0.001), loss='mse')
model.fit(np.zeros((10, 4)),
np.ones((10, 8)))
```
Args:
input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis).
batch_size: Optional input batch size (integer or None).
dtype: Optional datatype of the input. When not provided, the Keras
default float type will be used.
input_tensor: Optional tensor to use as layer input. If set, the layer
will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
sparse: Boolean, whether the placeholder created is meant to be sparse.
Default to False.
ragged: Boolean, whether the placeholder created is meant to be ragged.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see
[this guide](https://www.tensorflow.org/guide/ragged_tensors).
Default to False.
type_spec: A `tf.TypeSpec` object to create Input from. This `tf.TypeSpec`
represents the entire batch. When provided, all other args except
name must be None.
name: Optional name of the layer (string).
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=None,
name=None,
ragged=None,
type_spec=None,
**kwargs):
self._init_input_shape = input_shape
self._init_batch_size = batch_size
self._init_dtype = dtype
self._init_sparse = sparse
self._init_ragged = ragged
self._init_type_spec = type_spec
strategy = distribute_lib.get_strategy()
if strategy and batch_size is not None and \
distributed_training_utils.global_batch_size_supported(strategy):
if batch_size % strategy.num_replicas_in_sync != 0:
raise ValueError('The `batch_size` argument ({}) must be divisible by '
'the number of replicas ({})'.format(
batch_size, strategy.num_replicas_in_sync))
batch_size = batch_size // strategy.num_replicas_in_sync
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
# Set the input shape and batch size from the batch_input_shape.
# Note that batch_input_shape can be None (unknown rank) or [] (scalar),
# in which case the batch size must be None.
if batch_input_shape:
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True in a Keras input.')
if not name:
prefix = 'input'
name = prefix + '_' + str(backend.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = backend.floatx()
else:
dtype = backend.dtype(input_tensor)
elif input_tensor is not None and input_tensor.dtype != dtype:
raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' %
(input_tensor.dtype, dtype))
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = True if sparse else False
self.ragged = True if ragged else False
self.batch_size = batch_size
self.supports_masking = True
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
elif isinstance(input_shape, int):
input_shape = (input_shape,)
if type_spec is not None:
args_that_must_be_none = [
('(input_)shape', self._init_input_shape),
('batch_size', self._init_batch_size),
('dtype', self._init_dtype),
('input_tensor', input_tensor),
('sparse', self._init_sparse),
('ragged', self._init_ragged),
]
for arg_name, arg in args_that_must_be_none:
_assert_other_arg_none(arg_name, arg)
if not ops.executing_eagerly_outside_functions():
raise ValueError('Creating Keras inputs from a type_spec is only '
'supported when eager execution is enabled.')
input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)
if isinstance(input_tensor, keras_tensor.SparseKerasTensor):
self.sparse = True
if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):
self.ragged = True
self.is_placeholder = True
try:
self._batch_input_shape = tuple(input_tensor.shape.as_list())
except ValueError:
# If the shape cannot be represented as a tuple (e.g. unknown rank)
self._batch_input_shape = None
elif input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
graph = backend.get_graph()
with graph.as_default():
input_tensor = backend.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name,
sparse=sparse,
ragged=ragged)
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
if ops.executing_eagerly_outside_functions():
if not isinstance(input_tensor, keras_tensor.KerasTensor):
input_tensor = keras_tensor.keras_tensor_from_tensor(input_tensor)
else:
if not tf_utils.is_symbolic_tensor(input_tensor):
raise ValueError('You should not pass an EagerTensor to `Input`. '
'For example, instead of creating an '
'InputLayer, you should instantiate your model and '
'directly call it on your input.')
self.is_placeholder = False
try:
self._batch_input_shape = tuple(input_tensor.shape.as_list())
except ValueError:
# If the shape cannot be represented as a tuple (e.g. unknown rank)
self._batch_input_shape = None
# Create an input node.
input_tensor._keras_mask = None
node_module.Node(layer=self, outputs=input_tensor)
# Store type spec
if isinstance(input_tensor, keras_tensor.KerasTensor) or (
tf_utils.is_extension_type(input_tensor)):
self._type_spec = input_tensor._type_spec # pylint: disable=protected-access
else:
self._type_spec = tensor_spec.TensorSpec(
shape=input_tensor.shape, dtype=input_tensor.dtype, name=self.name)
def get_config(self):
if self._init_type_spec is not None:
config = {
'name': self.name,
'type_spec': self._init_type_spec
}
else:
config = {
'batch_input_shape': self._batch_input_shape,
'dtype': self.dtype,
'sparse': self.sparse,
'ragged': self.ragged,
'name': self.name,
}
return config
@property
def _trackable_saved_model_saver(self):
return layer_serialization.InputLayerSavedModelSaver(self)
def Input( # pylint: disable=invalid-name
shape=None,
batch_size=None,
name=None,
dtype=None,
sparse=None,
tensor=None,
ragged=None,
type_spec=None,
**kwargs):
"""`Input()` is used to instantiate a Keras tensor.
A Keras tensor is a symbolic tensor-like object,
which we augment with certain attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Args:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be None; 'None' elements represent dimensions where the shape is
not known.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
sparse: A boolean specifying whether the placeholder to be created is
sparse. Only one of 'ragged' and 'sparse' can be True. Note that,
if `sparse` is False, sparse tensors can still be passed into the
input - they will be densified with a default value of 0.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
ragged: A boolean specifying whether the placeholder to be created is
ragged. Only one of 'ragged' and 'sparse' can be True. In this case,
values of 'None' in the 'shape' argument represent ragged dimensions.
For more information about RaggedTensors, see
[this guide](https://www.tensorflow.org/guide/ragged_tensors).
type_spec: A `tf.TypeSpec` object to create the input placeholder from.
When provided, all other args except name must be None.
**kwargs: deprecated arguments support. Supports `batch_shape` and
`batch_input_shape`.
Returns:
A `tensor`.
Example:
```python
# this is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Note that even if eager execution is enabled,
`Input` produces a symbolic tensor-like object (i.e. a placeholder).
This symbolic tensor-like object can be used with lower-level
TensorFlow ops that take tensors as inputs, as such:
```python
x = Input(shape=(32,))
y = tf.square(x) # This op will be treated like a layer
model = Model(x, y)
```
(This behavior does not work for higher-order TensorFlow APIs such as
control flow and being directly watched by a `tf.GradientTape`).
However, the resulting model will not track any variables that were
used as inputs to TensorFlow ops. All variable usages must happen within
Keras layers to make sure they will be tracked by the model's weights.
The Keras Input can also create a placeholder from an arbitrary `tf.TypeSpec`,
e.g:
```python
x = Input(type_spec=tf.RaggedTensorSpec(shape=[None, None],
dtype=tf.float32, ragged_rank=1))
y = x.values
model = Model(x, y)
```
When passing an arbitrary `tf.TypeSpec`, it must represent the signature of an
entire batch instead of just one example.
Raises:
ValueError: If both `sparse` and `ragged` are provided.
ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are
provided.
ValueError: If `shape`, `tensor` and `type_spec` are None.
ValueError: If arguments besides `type_spec` are non-None while `type_spec`
is passed.
ValueError: if any unrecognized parameters are provided.
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True in a Keras input.')
input_layer_config = {'name': name, 'dtype': dtype, 'sparse': sparse,
'ragged': ragged, 'input_tensor': tensor,
'type_spec': type_spec}
batch_input_shape = kwargs.pop('batch_input_shape',
kwargs.pop('batch_shape', None))
if shape is not None and batch_input_shape is not None:
raise ValueError('Only provide the `shape` OR `batch_input_shape` argument '
'to Input, not both at the same time.')
if (batch_input_shape is None and shape is None and tensor is None
and type_spec is None):
raise ValueError('Please provide to Input a `shape`'
' or a `tensor` or a `type_spec` argument. Note that '
'`shape` does not include the batch '
'dimension.')
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if batch_input_shape:
shape = batch_input_shape[1:]
input_layer_config.update({'batch_input_shape': batch_input_shape})
else:
input_layer_config.update(
{'batch_size': batch_size, 'input_shape': shape})
input_layer = InputLayer(**input_layer_config)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].outputs
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
else:
return outputs
|
InputLayer
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/definitions/_internal/contextmanager.py
|
{
"start": 1834,
"end": 2506
}
|
class ____(type):
_context: deque
# TODO: Task-SDK:
# share_parent_context can go away once the Dag and TaskContext manager in airflow.models are removed and
# everything uses sdk fully for definition/parsing
def __new__(cls, name, bases, namespace, share_parent_context: bool = False, **kwargs: Any):
if not share_parent_context:
namespace["_context"] = deque()
new_cls = super().__new__(cls, name, bases, namespace, **kwargs)
return new_cls
@property
def active(self) -> bool:
"""The active property says if any object is currently in scope."""
return bool(self._context)
|
ContextStackMeta
|
python
|
getsentry__sentry
|
src/sentry/testutils/cases.py
|
{
"start": 94325,
"end": 94489
}
|
class ____(TypedDict):
name: str
fields: list[str]
aggregates: list[str]
columns: list[str]
fieldAliases: list[str]
conditions: str
|
_QueryDict
|
python
|
mlflow__mlflow
|
mlflow/genai/judges/optimizers/simba.py
|
{
"start": 1330,
"end": 5281
}
|
class ____(DSPyAlignmentOptimizer):
"""
SIMBA (Simplified Multi-Bootstrap Aggregation) alignment optimizer.
Uses DSPy's SIMBA algorithm to optimize judge prompts through
bootstrap aggregation with simplified parametrization.
Note on Logging:
By default, SIMBA optimization suppresses DSPy's verbose output.
To see detailed optimization progress from DSPy, set the MLflow logger to DEBUG::
import logging
logging.getLogger("mlflow.genai.judges.optimizers.simba").setLevel(logging.DEBUG)
"""
# Class constants for default SIMBA parameters
DEFAULT_SEED: ClassVar[int] = 42
def __init__(
self,
model: str | None = None,
batch_size: int | None = None,
seed: int | None = None,
simba_kwargs: dict[str, Any] | None = None,
**kwargs,
):
"""
Initialize SIMBA optimizer with customizable parameters.
Args:
model: Model to use for DSPy optimization. If None, uses get_default_model().
batch_size: Batch size for SIMBA evaluation. If None, uses get_min_traces_required().
seed: Random seed for reproducibility. If None, uses DEFAULT_SEED (42).
simba_kwargs: Additional keyword arguments to pass directly to dspy.SIMBA().
Supported parameters include:
- metric: Custom metric function (overrides default agreement_metric)
- max_demos: Maximum number of demonstrations to use
- num_threads: Number of threads for parallel optimization
- max_steps: Maximum number of optimization steps
See https://dspy.ai/api/optimizers/SIMBA/ for full list.
**kwargs: Additional keyword arguments passed to parent class
"""
super().__init__(model=model, **kwargs)
self._batch_size = batch_size
self._seed = seed or self.DEFAULT_SEED
self._simba_kwargs = simba_kwargs or {}
def _get_batch_size(self) -> int:
"""
Get the batch size for SIMBA optimization.
Returns:
The batch size to use for SIMBA optimization.
"""
return self._batch_size if self._batch_size is not None else self.get_min_traces_required()
def _dspy_optimize(
self,
program: "dspy.Module",
examples: Collection["dspy.Example"],
metric_fn: Callable[["dspy.Example", Any, Any | None], bool],
) -> "dspy.Module":
"""
Perform SIMBA optimization with algorithm-specific parameters.
SIMBA uses all examples as training data (no separate validation set).
Args:
program: The DSPy program to optimize
examples: Examples for optimization
metric_fn: Default metric function for optimization
Returns:
Optimized DSPy program
"""
with _suppress_verbose_logging("dspy.teleprompt.simba"):
# Build SIMBA optimizer kwargs starting with required parameters
# If metric is in simba_kwargs, it will override the default metric_fn
optimizer_kwargs = {
"metric": metric_fn,
"bsize": self._get_batch_size(),
**self._simba_kwargs, # Pass through any additional SIMBA parameters
}
optimizer = dspy.SIMBA(**optimizer_kwargs)
_logger.info(
f"Starting SIMBA optimization with {len(examples)} examples "
f"(set logging to DEBUG for detailed output)"
)
# Compile with SIMBA-specific parameters
result = optimizer.compile(
student=program,
trainset=examples,
seed=self._seed,
)
_logger.info("SIMBA optimization completed")
return result
|
SIMBAAlignmentOptimizer
|
python
|
getsentry__sentry
|
src/sentry/users/api/serializers/useremail.py
|
{
"start": 324,
"end": 449
}
|
class ____(TypedDict):
email: str
isPrimary: bool
isVerified: bool
@register(UserEmail)
|
UserEmailSerializerResponse
|
python
|
encode__django-rest-framework
|
tests/schemas/views.py
|
{
"start": 6381,
"end": 6763
}
|
class ____(generics.GenericAPIView):
serializer_class = ExampleValidatedSerializer
schema = AutoSchema(component_name="Duplicate")
def get(self, *args, **kwargs):
from datetime import datetime
now = datetime.now()
serializer = self.get_serializer(data=now.date(), datetime=now)
return Response(serializer.data)
|
ExampleAutoSchemaDuplicate1
|
python
|
google__jax
|
jax/_src/interpreters/partial_eval.py
|
{
"start": 54119,
"end": 73533
}
|
class ____(NamedTuple):
src: MemoryKind
dst: MemoryKind
RematCases = Union[RecomputeType, SaveableType, Offloadable]
RematCases_ = Union[RematCases, bool]
def ensure_enum(case: bool | RematCases) -> RematCases:
if isinstance(case, bool):
return Saveable if case else Recompute
if not isinstance(case, (RecomputeType, SaveableType, Offloadable)):
msg = ("Value returned by a remat policy should be a bool or"
" `ad_checkpoint.Recompute`, `ad_checkpoint.Saveable` or"
" `ad_checkpoint.Offloadable(...)`."
f" Got {case} of type {type(case)}.")
if isinstance(case, Offloadable):
msg += ("Did you return `Offloadable` instead of an instantiated"
" `Offloadable(...)`?")
raise TypeError(msg)
return case
# A primitive rule for policy-driven partial evaluation returns a 5-tuple
# with the components representing, respectively:
# * the JaxprEqn for the 'known' side (or None if there is no known component),
# * the JaxprEqn for the 'unknown' side (or None),
# * a list of booleans indicating which of the original outputs are unknown,
# * a list of booleans indicating which of the original outputs are
# instantiated (i.e. available) in the 'unknown' side,
# * a list of Var instances representing residuals to be added (i.e. to be
# plumbed as outputs of the 'known' side jaxpr and added as input binders to
# the 'unknown' jaxpr).
PartialEvalCustomResult = tuple[Union[JaxprEqn, None], Union[JaxprEqn, None],
Sequence[bool], Sequence[bool], list[Var]]
PartialEvalCustomRule = Callable[
[Callable[..., RematCases_], Sequence[bool], Sequence[bool], JaxprEqn],
PartialEvalCustomResult]
partial_eval_jaxpr_custom_rules: dict[Primitive, PartialEvalCustomRule] = {}
def partial_eval_jaxpr_custom_rule_not_implemented(
name: str, saveable: Callable[..., RematCases_], unks_in: Sequence[bool],
inst_in: Sequence[bool], eqn: JaxprEqn) -> PartialEvalCustomResult:
msg = (f'custom-policy remat rule not implemented for {name}, '
'open a feature request at https://github.com/jax-ml/jax/issues!')
raise NotImplementedError(msg)
ParamsUpdater = Callable[[Sequence[bool], Sequence[bool], Sequence[bool],
Sequence[bool], int, dict, dict],
tuple[dict, dict]]
ResAvalUpdater = Callable[[dict[str, Any], AbstractValue], AbstractValue]
def _default_res_aval_updater(
params: dict[str, Any], aval: AbstractValue) -> AbstractValue:
return aval
def call_partial_eval_custom_rule(
jaxpr_param_name: str, params_updater: ParamsUpdater,
saveable: Callable[..., RematCases_], unks_in: list[bool], inst_in: list[bool],
eqn: JaxprEqn, *, res_aval: ResAvalUpdater = _default_res_aval_updater,
ctx = contextlib.nullcontext,
) -> tuple[JaxprEqn, JaxprEqn, Sequence[bool], Sequence[bool], list[Var]]:
jaxpr = eqn.params[jaxpr_param_name]
with ctx(eqn.params):
jaxpr_known, jaxpr_staged, unks_out, inst_out, num_res = \
partial_eval_jaxpr_custom(jaxpr, unks_in, inst_in, False, False, saveable)
ins_known, _ = partition_list(unks_in, eqn.invars)
out_binders_known, _ = partition_list(unks_out, eqn.outvars)
_, ins_staged = partition_list(inst_in, eqn.invars)
_, out_binders_staged = partition_list(inst_out, eqn.outvars)
params_known = {**eqn.params, jaxpr_param_name: jaxpr_known}
params_staged = {**eqn.params, jaxpr_param_name: jaxpr_staged}
params_known, params_staged = params_updater(
unks_in, inst_in, map(op.not_, unks_out), inst_out, num_res, params_known,
params_staged)
residuals = [Var(res_aval(params_known, var.aval))
for var in jaxpr_staged.invars[:num_res]]
eqn_known = new_jaxpr_eqn(
ins_known, [*out_binders_known, *residuals], eqn.primitive, params_known,
core.eqn_effects(jaxpr_known), eqn.source_info, eqn.ctx)
eqn_staged = new_jaxpr_eqn(
[*residuals, *ins_staged], out_binders_staged, eqn.primitive,
params_staged, core.eqn_effects(jaxpr_staged), eqn.source_info,
eqn.ctx)
assert len(eqn_staged.invars) == len(jaxpr_staged.invars)
new_inst = [x for x, inst in zip(eqn.invars, inst_in)
if type(x) is Var and not inst]
return eqn_known, eqn_staged, unks_out, inst_out, new_inst + residuals
# TODO(mattjj): unify with ParamsUpdater (this one takes an extra int)
ParamsUpdater2 = Callable[[Sequence[bool], Sequence[bool], Sequence[bool],
Sequence[bool], int, int, dict, dict],
tuple[dict, dict]]
def closed_call_partial_eval_custom_rule(
jaxpr_param_name: str, params_updater: ParamsUpdater2,
saveable: Callable[..., RematCases_], unks_in: list[bool], inst_in: list[bool],
eqn: JaxprEqn, *, res_aval: ResAvalUpdater = _default_res_aval_updater,
) -> tuple[JaxprEqn, JaxprEqn, Sequence[bool], Sequence[bool], list[Var]]:
# TODO(sharadmv,mattjj): dedup this rule with call_partial_eval_custom_rule.
dropvars = tuple(isinstance(v, DropVar) for v in eqn.outvars)
jaxpr_known, jaxpr_staged, unks_out, inst_out, num_res_ref, num_res_val, out_fwd = \
_closed_jaxpr_partial_eval_custom_cached(
eqn.params[jaxpr_param_name], (*unks_in,), (*inst_in,), dropvars, saveable)
num_res = num_res_ref + num_res_val
out_binders_known, _ = partition_list(unks_out, eqn.outvars)
ins_known, _ = partition_list(unks_in, eqn.invars)
_, ins_staged = partition_list(inst_in, eqn.invars)
_, out_binders_staged = partition_list(inst_out, eqn.outvars)
params_known = {**eqn.params, jaxpr_param_name: jaxpr_known}
params_staged = {**eqn.params, jaxpr_param_name: jaxpr_staged}
params_known, params_staged = params_updater(
unks_in, inst_in, map(op.not_, unks_out), inst_out,
sum(f is None for f in out_fwd), num_res, params_known, params_staged)
res_val_binders, res_ref_binders = split_list(
[Var(res_aval(params_known, v))
for v in jaxpr_staged.in_avals[:num_res]], [num_res_val])
res_val_binders = [v for v, f in zip(res_val_binders, out_fwd) if f is None]
res_val_vars = subs_list(out_fwd, out_binders_known, res_val_binders)
eqn_known = new_jaxpr_eqn(
[*ins_known, *res_ref_binders], [*out_binders_known, *res_val_binders],
eqn.primitive, params_known, core.eqn_effects(jaxpr_known),
eqn.source_info, eqn.ctx)
eqn_staged = new_jaxpr_eqn(
[*res_val_vars, *res_ref_binders, *ins_staged], out_binders_staged,
eqn.primitive, params_staged, core.eqn_effects(jaxpr_staged),
eqn.source_info, eqn.ctx)
assert len(eqn_staged.invars) == len(jaxpr_staged.in_avals)
assert len(ins_known) + len(res_ref_binders) == len(jaxpr_known.jaxpr.invars)
assert len(ins_staged) + len(res_ref_binders) + len(res_val_vars) == len(jaxpr_staged.jaxpr.invars)
assert len(out_binders_known) + len(res_val_binders) == len(jaxpr_known.jaxpr.outvars)
new_inst = [x for x, inst in zip(eqn.invars, inst_in)
if type(x) is Var and not inst]
new_vars = [*new_inst, *res_val_vars, *res_ref_binders]
return eqn_known, eqn_staged, unks_out, inst_out, new_vars
@weakref_lru_cache
def _closed_jaxpr_partial_eval_custom_cached(
jaxpr: ClosedJaxpr, unks_in: tuple[bool, ...], inst_in: tuple[bool, ...],
dropvars: tuple[bool, ...], saveable: Callable
) -> tuple[ClosedJaxpr, ClosedJaxpr, Sequence[bool], Sequence[bool],
int, int, Sequence[int | None]]:
jaxpr_known_, jaxpr_staged_, unks_out, inst_out, num_res_val, num_res_ref = \
partial_eval_jaxpr_stateful(jaxpr.jaxpr, unks_in, inst_in,
False, False, saveable)
# Compute which residual value outputs are also *undropped* primal outputs.
num_out_primals = len(jaxpr_known_.outvars) - num_res_val
out_vars, res_vars = split_list(jaxpr_known_.outvars, [num_out_primals])
out_dropvars_known, _ = partition_list(unks_out, dropvars)
idx_map = {id(v): i for i, (v, b) in enumerate(zip(out_vars, out_dropvars_known))
if not b}
out_fwd = [idx_map.get(id(v)) for v in res_vars]
# Prune jaxpr_known_ outputs by removing forwards.
jaxpr_known_ = prune_jaxpr_outputs(
jaxpr_known_, [True] * num_out_primals + [f is None for f in out_fwd])
jaxpr_known = core.ClosedJaxpr(jaxpr_known_, jaxpr.consts)
jaxpr_staged = core.ClosedJaxpr(jaxpr_staged_, jaxpr.consts)
return jaxpr_known, jaxpr_staged, unks_out, inst_out, num_res_ref, num_res_val, out_fwd
partial_eval_jaxpr_custom_rules[core.call_p] = \
partial(call_partial_eval_custom_rule, 'call_jaxpr',
lambda _, __, ___, ____, _____, x, y: (x, y))
partial_eval_jaxpr_custom_rules[core.closed_call_p] = \
partial(closed_call_partial_eval_custom_rule, 'call_jaxpr',
lambda _, __, ___, ____, _____, ______, x, y: (x, y))
def _jaxpr_forwarding(jaxpr: Jaxpr) -> list[int | None]:
# Compute which inputs are just forwarded to outputs.
fwds: dict[Var, Atom] = dict(zip(jaxpr.invars, jaxpr.invars))
for eqn in jaxpr.eqns:
if eqn.primitive in forwarding_rules:
eqn = eqn.replace(invars=[a if type(a) is Literal else fwds.get(a, a) # type: ignore
for a in eqn.invars])
fwd_idx, _ = forwarding_rules[eqn.primitive](eqn)
for v_orig, idx in zip(eqn.outvars, fwd_idx):
if idx is not None:
fwds[v_orig] = eqn.invars[idx]
idxs: dict[Var, int] = {v: i for i, v in enumerate(jaxpr.invars)}
return [None if type(v) is Literal else idxs.get(fwds.get(v)) # type: ignore
for v in jaxpr.outvars]
def prune_jaxpr_outputs(jaxpr: Jaxpr, used_outputs: Sequence[bool]) -> Jaxpr:
return _prune_jaxpr_outputs_cached(jaxpr, tuple(used_outputs))
def _prune_jaxpr_outputs(jaxpr: Jaxpr, used_outputs: tuple[bool, ...]) -> Jaxpr:
outvars = [v for v, b in zip(jaxpr.outvars, used_outputs) if b]
dbg = core.DebugInfo(
jaxpr.debug_info.traced_for, jaxpr.debug_info.func_src_info,
jaxpr.debug_info.arg_names,
jaxpr.debug_info.filter_result_paths(used_outputs))
new_jaxpr = jaxpr.replace(outvars=outvars, debug_info=dbg)
config.enable_checks.value and core.check_jaxpr(new_jaxpr)
return new_jaxpr
_prune_jaxpr_outputs_cached = weakref_lru_cache(_prune_jaxpr_outputs)
def prune_closed_jaxpr_outputs(
jaxpr: ClosedJaxpr, used_outputs: Sequence[bool]
) -> ClosedJaxpr:
return _prune_closed_jaxpr_outputs(jaxpr, tuple(used_outputs))
@partial(weakref_lru_cache, trace_context_in_key=False)
def _prune_closed_jaxpr_outputs(
jaxpr: ClosedJaxpr, used_outputs: tuple[bool, ...]
) -> ClosedJaxpr:
return ClosedJaxpr(_prune_jaxpr_outputs(jaxpr.jaxpr, used_outputs),
jaxpr.consts)
def dce_jaxpr(jaxpr: Jaxpr, used_outputs: Sequence[bool],
instantiate: bool | Sequence[bool] = False,
) -> tuple[Jaxpr, list[bool]]:
"""Runs dead-code elementation on a given jaxpr.
Args:
jaxpr: The jaxpr to DCE.
used_outputs: A list of bools indicating which outputs are used.
instantiate: A bool or a list of bools indicating which inputs should be
considered used, regardless of whether they are actually used in a jaxpr.
If a bool, the same value is used for all inputs.
Returns:
A tuple of ``(new_jaxpr, used_inputs)``.
"""
if type(instantiate) is bool:
instantiate = (instantiate,) * len(jaxpr.invars)
return _dce_jaxpr(jaxpr, tuple(used_outputs), tuple(instantiate))
def dce_jaxpr_consts(jaxpr: Jaxpr, used_outputs: Sequence[bool],
instantiate: bool | Sequence[bool] = False,
) -> tuple[Jaxpr, list[bool], list[bool]]:
jaxpr_ = convert_constvars_jaxpr(jaxpr)
new_jaxpr, used_inputs_ = dce_jaxpr(jaxpr_, used_outputs, instantiate)
used_consts, used_inputs = split_list(used_inputs_, [len(jaxpr.constvars)])
if sum(used_consts):
new_jaxpr = convert_invars_to_constvars(new_jaxpr, sum(used_consts))
return new_jaxpr, used_consts, used_inputs
def has_effects(eqn: JaxprEqn) -> bool:
effs = {e for e in eqn.effects if not isinstance(e, core.NamedAxisEffect)
and not isinstance(e, ReadEffect)}
return bool(effs)
@weakref_lru_cache
def _dce_jaxpr(jaxpr: Jaxpr, used_outputs: tuple[bool, ...],
instantiate: tuple[bool, ...]
) -> tuple[Jaxpr, list[bool]]:
env: dict[Var, bool] = {}
def read(v: Var) -> bool:
return env.get(v, False)
def write(x: Atom, b: bool) -> None:
if type(x) is Var:
env[x] = read(x) or b
new_eqns = []
foreach(write, jaxpr.outvars, used_outputs)
for eqn in jaxpr.eqns[::-1]:
used_outs = map(read, eqn.outvars)
rule = dce_rules.get(eqn.primitive, _default_dce_rule)
used_ins, new_eqn = rule(used_outs, eqn)
if new_eqn is not None:
new_eqns.append(new_eqn)
foreach(write, eqn.invars, used_ins)
used_inputs = map(read, jaxpr.invars)
used_inputs = map(op.or_, instantiate, used_inputs)
invars = [v for v, b in zip(jaxpr.invars, used_inputs) if b]
outvars = [v for v, b in zip(jaxpr.outvars, used_outputs) if b]
eqns = new_eqns[::-1]
jaxpr_effects = make_jaxpr_effects(jaxpr.constvars, invars, outvars, eqns)
dbg = core.DebugInfo(
jaxpr.debug_info.traced_for, jaxpr.debug_info.func_src_info,
jaxpr.debug_info.filter_arg_names(used_inputs),
jaxpr.debug_info.filter_result_paths(used_outputs))
new_jaxpr = jaxpr.replace(invars=invars, outvars=outvars, eqns=eqns,
effects=jaxpr_effects, debug_info=dbg)
config.enable_checks.value and core.check_jaxpr(new_jaxpr)
return new_jaxpr, used_inputs
DCERule = Callable[[list[bool], JaxprEqn],
tuple[list[bool], Union[JaxprEqn, None]]]
def _default_dce_rule(
used_outs: list[bool], eqn: JaxprEqn
) -> tuple[list[bool], JaxprEqn | None]:
if not any(used_outs) and not has_effects(eqn):
return [False] * len(eqn.invars), None
return [True] * len(eqn.invars), eqn
dce_rules: dict[Primitive, DCERule] = {}
def dce_jaxpr_call_rule(used_outputs: list[bool], eqn: JaxprEqn
) -> tuple[list[bool], JaxprEqn | None]:
if not any(used_outputs) and not has_effects(eqn):
return [False] * len(eqn.invars), None
new_jaxpr, used_inputs = dce_jaxpr(eqn.params['call_jaxpr'], used_outputs)
new_params = dict(eqn.params, call_jaxpr=new_jaxpr)
update_params = call_param_updaters.get(eqn.primitive)
if update_params:
new_params = update_params(new_params, used_inputs, 0)
if not any(used_inputs) and not any(used_outputs) and not new_jaxpr.effects:
return used_inputs, None
else:
new_eqn = new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[v for v, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, new_jaxpr.effects, eqn.source_info, eqn.ctx)
return used_inputs, new_eqn
dce_rules[core.call_p] = dce_jaxpr_call_rule
@weakref_lru_cache
def _cached_closed_call_dce(jaxpr_, used_outputs: tuple[bool, ...]
) -> tuple[core.ClosedJaxpr, list[bool]]:
jaxpr, consts = jaxpr_.jaxpr, jaxpr_.consts
new_jaxpr, used_inputs = dce_jaxpr(jaxpr, used_outputs)
return core.ClosedJaxpr(new_jaxpr, consts), used_inputs
def dce_jaxpr_closed_call_rule(used_outputs: list[bool], eqn: JaxprEqn
) -> tuple[list[bool], JaxprEqn | None]:
# TODO(mattjj): de-duplicate with above rule?
if not any(used_outputs) and not has_effects(eqn):
return [False] * len(eqn.invars), None
jaxpr_ = eqn.params['call_jaxpr']
closed_jaxpr, used_inputs = _cached_closed_call_dce(jaxpr_, tuple(used_outputs))
effects = core.eqn_effects(closed_jaxpr)
new_params = dict(eqn.params, call_jaxpr=closed_jaxpr)
new_eqn = new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[v for v, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, effects, eqn.source_info, eqn.ctx)
return used_inputs, new_eqn
dce_rules[core.closed_call_p] = dce_jaxpr_closed_call_rule
@weakref_lru_cache
def close_jaxpr(jaxpr: Jaxpr) -> ClosedJaxpr:
# The `jaxpr.replace()` is making a copy of the Jaxpr, without which
# the cache value would have a strong reference to the same Jaxpr as
# the key, and we would never gc the cache entry. This works because
# Jaxpr is hashed by id, and the cache entry is dead is the key is dead.
return ClosedJaxpr(jaxpr.replace(), ())
def move_invars_right(jaxpr: ClosedJaxpr, to_move: Sequence[bool]):
return _move_invars_right(jaxpr, tuple(to_move))
@weakref_lru_cache
def _move_invars_right(jaxpr: ClosedJaxpr, to_move: tuple[bool, ...]):
invars, rest = split_list(jaxpr.jaxpr.invars, [len(to_move)])
left_invars, right_invars = partition_list(to_move, invars)
new_invars = [*left_invars, *right_invars, *rest]
new_effs = _renumber_effects(
(*jaxpr.jaxpr.constvars, *new_invars),
(*jaxpr.jaxpr.constvars, *jaxpr.jaxpr.invars),
jaxpr.jaxpr.effects)
new_jaxpr = jaxpr.jaxpr.replace(invars=new_invars, effects=new_effs)
return jaxpr.replace(jaxpr=new_jaxpr)
def move_binders_to_front(closed_jaxpr: ClosedJaxpr, to_move: Sequence[bool]
) -> ClosedJaxpr:
"""Reorder `invars` by moving those indicated in `to_move` to the front."""
return _move_binders_to_front(closed_jaxpr, tuple(to_move))
@weakref_lru_cache
def _move_binders_to_front(jaxpr: ClosedJaxpr, to_move: tuple[bool, ...]
) -> ClosedJaxpr:
assert len(jaxpr.in_avals) == len(to_move)
constvars, invars = jaxpr.jaxpr.constvars, jaxpr.jaxpr.invars
new_invars = _move_to_front(invars, to_move)
new_effs = _renumber_effects(
(*constvars, *new_invars), (*constvars, *invars), jaxpr.jaxpr.effects)
if jaxpr.jaxpr.debug_info.arg_names is None:
new_arg_names = None
else:
new_arg_names = tuple(_move_to_front(jaxpr.jaxpr.debug_info.arg_names, to_move))
dbg = jaxpr.jaxpr.debug_info._replace(arg_names=new_arg_names)
new_jaxpr = jaxpr.jaxpr.replace(
constvars=constvars, invars=new_invars, effects=new_effs, debug_info=dbg)
return core.ClosedJaxpr(new_jaxpr, jaxpr.consts)
def _renumber_effects(new_vars, old_vars, effs):
newvar_idxs = {id(v): i for i, v in enumerate(new_vars)}
old_to_new = {i: newvar_idxs[id(v)] for i, v in enumerate(old_vars)}
return {e.replace(input_index=old_to_new[e.input_index])
if isinstance(e, effects.JaxprInputEffect) else e for e in effs}
def _move_to_front(lst: Sequence, to_move: Sequence[bool]) -> Sequence:
return ([elt for elt, move in zip(lst, to_move) if move] +
[elt for elt, move in zip(lst, to_move) if not move])
def move_binders_to_back(closed_jaxpr: ClosedJaxpr, to_move: Sequence[bool]
) -> ClosedJaxpr:
"""Reorder `invars` by moving those indicated in `to_move` to the back."""
return move_binders_to_front(closed_jaxpr, map(op.not_, to_move))
def move_outvars_to_back(jaxpr: ClosedJaxpr, to_move: Sequence[bool]) -> ClosedJaxpr:
return _move_outvars_to_back(jaxpr, tuple(to_move))
@weakref_lru_cache
def _move_outvars_to_back(jaxpr: core.ClosedJaxpr, to_move):
new_outvars = ([e for e, m in zip(jaxpr.jaxpr.outvars, to_move) if not m] +
[e for e, m in zip(jaxpr.jaxpr.outvars, to_move) if m])
return jaxpr.replace(jaxpr=jaxpr.jaxpr.replace(outvars=new_outvars))
|
Offloadable
|
python
|
Netflix__metaflow
|
metaflow/plugins/env_escape/communication/socket_bytestream.py
|
{
"start": 230,
"end": 3572
}
|
class ____(ByteStream):
@classmethod
def connect(cls, host, port):
family, socktype, proto, _, sockaddr = socket.getaddrinfo(
host, port, socket.AF_INET, socket.SOCK_STREAM
)
try:
sock = socket.socket(family=family, type=socktype)
sock.settimeout(CONNECT_TIMEOUT)
__try_op__("connect", sock.connect, CONNECT_RETRY, sockaddr)
return cls(sock)
except BaseException:
sock.close()
raise
@classmethod
def unixconnect(cls, path):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.settimeout(CONNECT_TIMEOUT)
__try_op__("unixconnect", sock.connect, CONNECT_RETRY, path)
return cls(sock)
except BaseException:
sock.close()
raise
def __init__(self, sock):
self._sock = sock
self._sock.settimeout(None) # Make the socket blocking
self._is_closed = False
def read(self, count, timeout=None):
result = bytearray(count)
with memoryview(result) as m:
while count > 0:
try:
if timeout is not None:
# Yes, technically should divide by RECV_COUNT...
self._socket.settimeout(timeout)
nbytes = __try_op__(
"receive",
self._sock.recv_into,
RECV_RETRY,
m,
min(count, MAX_MSG_SIZE),
)
# If we don't receive anything, we reached EOF
if nbytes == 0:
raise socket.error()
count -= nbytes
m = m[nbytes:]
except socket.timeout:
continue
except socket.error as e:
self.close()
raise EOFError(e)
return result
def write(self, data):
with memoryview(data) as m:
total_count = m.nbytes
while total_count > 0:
try:
nbytes = __try_op__(
"send", self._sock.send, WRITE_RETRY, m[:MAX_MSG_SIZE]
)
m = m[nbytes:]
total_count -= nbytes
except socket.timeout:
continue
except socket.error as e:
self.close()
raise EOFError(e)
def close(self):
if self._is_closed:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
self._sock.close()
self._is_closed = True
@property
def is_closed(self):
return self._is_closed
def fileno(self):
try:
return self._sock.fileno()
except socket.error:
self.close()
exc = sys.exc_info()[1]
found_error = None
if hasattr(exc, "errno"):
found_error = exc.errno
else:
found_error = exc[0]
if found_error == errno.EBADF:
raise EOFError()
else:
raise
|
SocketByteStream
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 15393,
"end": 16386
}
|
class ____(object):
"""*
jina gRPC service to expose information about running jina version and environment.
"""
def _status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JinaInfoRPCServicer_to_server(servicer, server):
rpc_method_handlers = {
'_status': grpc.unary_unary_rpc_method_handler(
servicer._status,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=jina__pb2.JinaInfoProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jina.JinaInfoRPC', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
JinaInfoRPCServicer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/initsubclass1.py
|
{
"start": 1143,
"end": 1279
}
|
class ____(ClassG):
pass
# This should generate two errors because "a" is not present
# in the object.__init_subclass__ method.
|
ClassH
|
python
|
nedbat__coveragepy
|
tests/test_mixins.py
|
{
"start": 2344,
"end": 3015
}
|
class ____(TempDirMixin, RestoreModulesMixin):
"""Tests of SysPathModulesMixin."""
@pytest.mark.parametrize("val", [17, 42])
def test_module_independence(self, val: int) -> None:
self.make_file("xyzzy.py", f"A = {val}")
import xyzzy # pylint: disable=import-error
assert xyzzy.A == val
def test_cleanup_and_reimport(self) -> None:
self.make_file("xyzzy.py", "A = 17")
xyzzy = import_local_file("xyzzy")
assert xyzzy.A == 17
self.clean_local_file_imports()
self.make_file("xyzzy.py", "A = 42")
xyzzy = import_local_file("xyzzy")
assert xyzzy.A == 42
|
RestoreModulessMixinTest
|
python
|
numba__numba
|
numba/core/target_extension.py
|
{
"start": 3992,
"end": 4051
}
|
class ____(GPU):
"""Mark the target as CUDA.
"""
|
CUDA
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 9202,
"end": 12071
}
|
class ____:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
# Check byteorder of 0-dimensional objects
ua = np.array(self.ucs_value * self.ulen, dtype=f'U{self.ulen}')
ua2 = ua.view(ua.dtype.newbyteorder())
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
assert_(ua[()] != ua2[()])
ua3 = ua2.view(ua2.dtype.newbyteorder())
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesSD(self):
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value * self.ulen] * 2, dtype=f'U{self.ulen}')
ua2 = ua.view(ua.dtype.newbyteorder())
assert_((ua != ua2).all())
assert_(ua[-1] != ua2[-1])
ua3 = ua2.view(ua2.dtype.newbyteorder())
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesMD(self):
# Check byteorder of multi-dimensional objects
ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4,
dtype=f'U{self.ulen}')
ua2 = ua.view(ua.dtype.newbyteorder())
assert_((ua != ua2).all())
assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.view(ua2.dtype.newbyteorder())
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_cast(self):
# Check byteorder of when casting the array for a strided and
# contiguous array:
test1 = np.array([self.ucs_value * self.ulen] * 2, dtype=f'U{self.ulen}')
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_updowncast(self):
# Check byteorder of when casting the array to a longer and shorter
# string length for strided and contiguous arrays
test1 = np.array([self.ucs_value * self.ulen] * 2, dtype=f'U{self.ulen}')
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
# Cast to a longer type with zero padding
longer_type = np.dtype(f'U{self.ulen + 1}').newbyteorder()
ua2 = ua.astype(dtype=longer_type)
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
# Cast back again with truncating:
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
|
ByteorderValues
|
python
|
automl__auto-sklearn
|
test/test_metalearning/pyMetaLearn/test_meta_base.py
|
{
"start": 178,
"end": 2253
}
|
class ____(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.cwd = os.getcwd()
data_dir = os.path.dirname(__file__)
data_dir = os.path.join(data_dir, "test_meta_base_data")
os.chdir(data_dir)
pipeline = autosklearn.pipeline.classification.SimpleClassificationPipeline()
cs = pipeline.get_hyperparameter_search_space()
self.logger = logging.getLogger()
self.base = MetaBase(cs, data_dir, logger=self.logger)
def tearDown(self):
os.chdir(self.cwd)
def test_get_all_runs(self):
runs = self.base.get_all_runs()
self.assertIsInstance(runs, pd.DataFrame)
# TODO update this ASAP
self.assertEqual((125, 125), runs.shape)
def test_get_runs(self):
runs = self.base.get_runs("233")
# TODO update this ASAP
self.assertEqual(125, len(runs))
self.assertIsInstance(runs, pd.Series)
def test_get_metafeatures_single_dataset(self):
mf = self.base.get_metafeatures("233")
self.assertIsInstance(mf, pd.Series)
self.assertEqual(mf.name, "233")
self.assertEqual(mf.loc["NumberOfInstances"], 2142.0)
def test_get_metafeatures_single_feature(self):
mf = self.base.get_metafeatures(features="NumberOfInstances")
self.assertIsInstance(mf, pd.Series)
self.assertEqual(mf.shape, (132,))
def test_get_metafeatures_single_dataset_and_single_feature(self):
mf = self.base.get_metafeatures("233", features="NumberOfInstances")
self.assertEqual(mf.shape, ())
def test_get_metafeatures_multiple_datasets(self):
mf = self.base.get_metafeatures(["233", "236"])
self.assertIsInstance(mf, pd.DataFrame)
self.assertEqual(mf.shape, (2, 46))
def test_get_metafeatures_multiple_features(self):
mf = self.base.get_metafeatures(
features=["NumberOfInstances", "NumberOfClasses"]
)
self.assertIsInstance(mf, pd.DataFrame)
self.assertEqual(mf.shape, (132, 2))
|
MetaBaseTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-aws/dagster_aws_tests/s3_tests/test_io_manager.py
|
{
"start": 826,
"end": 6868
}
|
class ____(ConfigurableResource, IAttachDifferentObjectToOpContext):
def get_client(self) -> Any:
return construct_s3_client(max_attempts=5)
def get_object_to_set_on_execution_context(self) -> Any:
return self.get_client()
@resource
def s3_test_resource(_):
return construct_s3_client(max_attempts=5)
@pytest.fixture(name="s3_and_io_manager", params=[True, False])
def s3_and_io_manager_fixture(
request,
) -> tuple[Any, Callable[[Any], Any]]:
if request.param:
return s3_test_resource, lambda _: s3_pickle_io_manager
else:
return (
S3TestResource(),
lambda s3: S3PickleIOManager.configure_at_launch(s3_resource=s3),
)
def define_inty_job(s3_resource, s3_io_manager_builder):
@op(out=Out(Int))
def return_one():
return 1
@op(
ins={"num": In(Int)},
out=Out(Int),
)
def add_one(num):
return num + 1
@job(
resource_defs={
"io_manager": s3_io_manager_builder(s3_resource),
"s3": s3_resource,
}
)
def basic_external_plan_execution():
add_one(return_one())
return basic_external_plan_execution
def test_s3_pickle_io_manager_execution(mock_s3_bucket, s3_and_io_manager):
assert not len(list(mock_s3_bucket.objects.all()))
s3_resource, s3_io_manager_builder = s3_and_io_manager
inty_job = define_inty_job(s3_resource, s3_io_manager_builder)
run_config = {"resources": {"io_manager": {"config": {"s3_bucket": mock_s3_bucket.name}}}}
result = inty_job.execute_in_process(run_config)
assert result.output_for_node("return_one") == 1
assert result.output_for_node("add_one") == 2
assert len(list(mock_s3_bucket.objects.all())) == 2
def define_multiple_output_job():
@op(
out={
"foo": Out(Int),
"foobar": Out(Int),
}
)
def return_two_outputs():
yield Output(10, "foobar")
yield Output(5, "foo")
@job(resource_defs={"io_manager": s3_pickle_io_manager, "s3": s3_test_resource})
def output_prefix_execution_plan():
return_two_outputs()
return output_prefix_execution_plan
def test_s3_pickle_io_manager_prefix(mock_s3_bucket):
assert not len(list(mock_s3_bucket.objects.all()))
prefixy_job = define_multiple_output_job()
run_config = {"resources": {"io_manager": {"config": {"s3_bucket": mock_s3_bucket.name}}}}
result = prefixy_job.execute_in_process(run_config)
assert result.output_for_node("return_two_outputs", "foo") == 5
assert result.output_for_node("return_two_outputs", "foobar") == 10
assert len(list(mock_s3_bucket.objects.all())) == 2
def define_assets_job(bucket):
@op
def first_op(first_input):
assert first_input == 4
return first_input * 2
@op
def second_op(second_input):
assert second_input == 8
return second_input + 3
source1 = SourceAsset("source1", partitions_def=StaticPartitionsDefinition(["foo", "bar"]))
@asset
def asset1(source1):
return source1["foo"] + source1["bar"]
@asset
def asset2(asset1):
assert asset1 == 3
return asset1 + 1
@graph(ins={"asset2": GraphIn()}, out={"asset3": GraphOut()})
def graph_asset(asset2):
return second_op(first_op(asset2))
@asset(partitions_def=StaticPartitionsDefinition(["apple", "orange"]))
def partitioned():
return 8
graph_asset_def = AssetsDefinition.from_graph(graph_asset)
target_assets = [asset1, asset2, graph_asset_def, partitioned]
return Definitions(
assets=[*target_assets, source1],
jobs=[define_asset_job("assets", target_assets)],
resources={
"io_manager": s3_pickle_io_manager.configured({"s3_bucket": bucket}),
"s3": s3_test_resource,
},
).resolve_job_def("assets")
def test_s3_pickle_io_manager_asset_execution(mock_s3_bucket):
assert not len(list(mock_s3_bucket.objects.all()))
inty_job = define_assets_job(mock_s3_bucket.name)
# pickled_source1_foo = pickle.dumps(1)
mock_s3_bucket.put_object(Key="dagster/source1/foo", Body=pickle.dumps(1))
# pickled_source1_bar = pickle.dumps(2)
mock_s3_bucket.put_object(Key="dagster/source1/bar", Body=pickle.dumps(2))
result = inty_job.execute_in_process(partition_key="apple")
assert result.output_for_node("asset1") == 3
assert result.output_for_node("asset2") == 4
assert result.output_for_node("graph_asset.first_op") == 8
assert result.output_for_node("graph_asset.second_op") == 11
objects = list(mock_s3_bucket.objects.all())
assert len(objects) == 7
assert {(o.bucket_name, o.key) for o in objects} == {
("test-bucket", "dagster/source1/bar"),
("test-bucket", "dagster/source1/foo"),
("test-bucket", "dagster/asset1"),
("test-bucket", "dagster/asset2"),
("test-bucket", "dagster/asset3"),
("test-bucket", "dagster/partitioned/apple"),
(
"test-bucket",
"/".join(["dagster", "storage", result.run_id, "graph_asset.first_op", "result"]),
),
}
# re-execution does not cause issues, overwrites the buckets
result2 = inty_job.execute_in_process(partition_key="apple")
objects = list(mock_s3_bucket.objects.all())
assert len(objects) == 8
assert {(o.bucket_name, o.key) for o in objects} == {
("test-bucket", "dagster/source1/bar"),
("test-bucket", "dagster/source1/foo"),
("test-bucket", "dagster/asset1"),
("test-bucket", "dagster/asset2"),
("test-bucket", "dagster/asset3"),
("test-bucket", "dagster/partitioned/apple"),
(
"test-bucket",
"/".join(["dagster", "storage", result.run_id, "graph_asset.first_op", "result"]),
),
(
"test-bucket",
"/".join(["dagster", "storage", result2.run_id, "graph_asset.first_op", "result"]),
),
}
|
S3TestResource
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py
|
{
"start": 17683,
"end": 24047
}
|
class ____(CommonDdpComparisonTest):
def _run_test_ddp_comparision(self, simulate_uneven_inputs=False):
gLogger.info("Running trainer rank: %s", self.rank)
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
# Postfix file_name with "pg" since file_name is also used by RPC agent
init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"),
world_size=self.world_size,
rank=self.rank,
)
net = nn.Linear(2, 3)
ddp_net = DistributedDataParallel(net)
# Odd ranks join early if simulate_uneven_inputs.
num_inputs = 1
if simulate_uneven_inputs:
if self.rank % 2 == 0:
num_inputs += 2
inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)]
if simulate_uneven_inputs:
gLogger.info(
"Rank %s training with %s inputs.", self.rank, len(inputs_list)
)
# Use distributed autograd. The gradients will be in RPC context map.
grads_dict = {}
with ddp_net.join(simulate_uneven_inputs):
for i, inputs in enumerate(inputs_list):
with dist_autograd.context() as context_id:
loss = ddp_net(inputs).norm()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict)
# Use local autograd. The gradients will be in each variable's '.grad'.
ddp_net.zero_grad()
loss = ddp_net(inputs).norm()
loss.backward()
# The gradients should be the same
for param in net.parameters():
self.assertTrue(
param in grads_dict,
msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}",
)
self.assertEqual(
grads_dict[param],
param.grad,
msg=f"The grads for param {param} are different under local "
f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}",
)
dist.destroy_process_group()
@requires_gloo()
@dist_init
def test_ddp_comparison(self):
self._run_test_ddp_comparision()
@requires_gloo()
@dist_init
def test_ddp_comparison_uneven_inputs(self):
# test with simulating uneven inputs in DDP
self._run_test_ddp_comparision(simulate_uneven_inputs=True)
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_sparse_grads(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
model = nn.EmbeddingBag(10, 3, sparse=True)
ddp_model = DistributedDataParallel(model)
# Different inputs for each
input = torch.LongTensor(10).random_(0, 10)
offsets = torch.LongTensor([0, 4])
# Run local.
loss = ddp_model(input, offsets).sum()
loss.backward()
with dist_autograd.context() as context_id:
loss = ddp_model(input, offsets).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads_dict))
self.assertEqual(model.weight.grad, grads_dict[model.weight])
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_local_vs_remote(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
# Use two different remote device input string, w/ and w/o the default
# device string "cpu", respectively.
for remote_device in ["worker0/cpu", "worker0"]:
remote_layer1 = RemoteModule(
remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False)
)
layer1 = nn.Linear(10, 5, False)
# Start with the same parameters for remote and local
layer1.weight = remote_layer1.module_rref.to_here().weight
# Run local case.
layer2 = nn.Linear(5, 1)
inputs = torch.rand((10, 10))
ddp_model = DistributedDataParallel(layer2)
loss = ddp_model(layer1(inputs)).sum()
loss.backward()
# Run remote case.
with dist_autograd.context() as context_id:
loss = ddp_model(remote_layer1(inputs)).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
dist.barrier()
self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
self.assertEqual(
layer1.weight.grad,
rpc.rpc_sync(
"worker0",
CommonDdpComparisonTest.get_remote_grads,
args=(remote_layer1.module_rref, context_id),
),
)
|
DdpComparisonTest
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/server/test_tornado__server.py
|
{
"start": 11185,
"end": 12706
}
|
class ____:
def test_app_static_path(self):
app = Application()
app._static_path = "foo"
result = bst.create_static_handler("/prefix", "/key", app)
assert len(result) == 3
assert result[0] == "/prefix/key/static/(.*)"
assert result[1] == StaticFileHandler
assert result[2] == {"path" : app.static_path}
result = bst.create_static_handler("/prefix", "/", app)
assert len(result) == 3
assert result[0] == "/prefix/static/(.*)"
assert result[1] == StaticFileHandler
assert result[2] == {"path" : app.static_path}
def test_no_app_static_path(self):
app = Application()
app._static_path = None
result = bst.create_static_handler("/prefix", "/key", app)
assert len(result) == 3
assert result[0] == "/prefix/key/static/(.*)"
assert result[1] == StaticHandler
assert result[2] == {}
result = bst.create_static_handler("/prefix", "/", app)
assert len(result) == 3
assert result[0] == "/prefix/static/(.*)"
assert result[1] == StaticHandler
assert result[2] == {}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Test_create_static_handler
|
python
|
huggingface__transformers
|
src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py
|
{
"start": 9355,
"end": 10461
}
|
class ____(nn.Module):
def __init__(self, config: VitPoseBackboneConfig):
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
num_experts = config.num_experts
part_features = config.part_features
self.part_features = part_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features - part_features)
self.num_experts = num_experts
self.experts = VitPoseNaiveMoe(config)
def forward(self, hidden_state: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.act(hidden_state)
shared_hidden_state = self.fc2(hidden_state)
indices = indices.view(-1, 1, 1)
expert_hidden_state = self.experts(hidden_state, indices)
hidden_state = torch.cat([shared_hidden_state, expert_hidden_state], dim=-1)
return hidden_state
|
VitPoseBackboneMoeMLP
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 21795,
"end": 21975
}
|
class ____(models.Model):
name = models.CharField(max_length=15, unique=True)
history = HistoricalRecords(custom_model_name=lambda x: f"Audit{x}")
|
OverrideModelNameAsCallable
|
python
|
facebookresearch__faiss
|
faiss/gpu/test/test_index_cpu_to_gpu.py
|
{
"start": 230,
"end": 3934
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.res = faiss.StandardGpuResources()
def create_index(self, factory_string):
dimension = 128
n = 2500
db_vectors = np.random.random((n, dimension)).astype('float32')
index = faiss.index_factory(dimension, factory_string)
index.train(db_vectors)
if factory_string.startswith("IDMap"):
index.add_with_ids(db_vectors, np.arange(n))
else:
index.add(db_vectors)
return index
def create_and_clone(self, factory_string,
allowCpuCoarseQuantizer=None,
use_cuvs=None):
idx = self.create_index(factory_string)
config = faiss.GpuClonerOptions()
if allowCpuCoarseQuantizer is not None:
config.allowCpuCoarseQuantizer = allowCpuCoarseQuantizer
if use_cuvs is not None:
config.use_cuvs = use_cuvs
faiss.index_cpu_to_gpu(self.res, 0, idx, config)
def verify_throws_not_implemented_exception(self, factory_string):
try:
self.create_and_clone(factory_string)
except Exception as e:
if "not implemented" not in str(e):
self.fail("Expected an exception but no exception was "
"thrown for factory_string: %s." % factory_string)
def verify_clones_successfully(self, factory_string,
allowCpuCoarseQuantizer=None,
use_cuvs=None):
try:
self.create_and_clone(
factory_string,
allowCpuCoarseQuantizer=allowCpuCoarseQuantizer,
use_cuvs=use_cuvs)
except Exception as e:
self.fail("Unexpected exception thrown factory_string: "
"%s; error message: %s." % (factory_string, str(e)))
def test_not_implemented_indices(self):
self.verify_throws_not_implemented_exception("PQ16")
self.verify_throws_not_implemented_exception("LSHrt")
self.verify_throws_not_implemented_exception("HNSW")
self.verify_throws_not_implemented_exception("HNSW,PQ16")
self.verify_throws_not_implemented_exception("IDMap,PQ16")
self.verify_throws_not_implemented_exception("IVF256,ITQ64,SH1.2")
def test_implemented_indices(self):
self.verify_clones_successfully("Flat")
self.verify_clones_successfully("IVF1,Flat")
self.verify_clones_successfully("IVF32,PQ8")
self.verify_clones_successfully("IDMap,Flat")
self.verify_clones_successfully("PCA12,IVF32,Flat")
self.verify_clones_successfully("PCA32,IVF32,PQ8")
self.verify_clones_successfully("PCA32,IVF32,PQ8np")
# set use_cuvs to false, these index types are not supported on cuVS
self.verify_clones_successfully("IVF32,SQ8", use_cuvs=False)
self.verify_clones_successfully(
"PCA32,IVF32,SQ8", use_cuvs=False)
def test_with_flag(self):
self.verify_clones_successfully("IVF32_HNSW,Flat",
allowCpuCoarseQuantizer=True)
self.verify_clones_successfully("IVF256(PQ2x4fs),Flat",
allowCpuCoarseQuantizer=True)
def test_with_flag_set_to_false(self):
try:
self.verify_clones_successfully("IVF32_HNSW,Flat",
allowCpuCoarseQuantizer=False)
except Exception as e:
if "set the flag to true to allow the CPU fallback" not in str(e):
self.fail("Unexepected error message thrown: %s." % str(e))
|
TestMoveToGpu
|
python
|
django__django
|
django/contrib/gis/geos/prototypes/geom.py
|
{
"start": 1062,
"end": 1249
}
|
class ____(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
errcheck = staticmethod(check_minus_one)
|
IntFromGeom
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/dml.py
|
{
"start": 64520,
"end": 68695
}
|
class ____(
DMLWhereBase, UpdateBase, HasSyntaxExtensions[Literal["post_criteria"]]
):
"""Represent a DELETE construct.
The :class:`_expression.Delete` object is created using the
:func:`_expression.delete()` function.
Available extension points:
* ``post_criteria``: applies additional logic after the ``WHERE`` clause.
"""
__visit_name__ = "delete"
is_delete = True
_traverse_internals = (
[
("table", InternalTraversal.dp_clauseelement),
("_where_criteria", InternalTraversal.dp_clauseelement_tuple),
("_returning", InternalTraversal.dp_clauseelement_tuple),
("_hints", InternalTraversal.dp_table_hint_list),
("_post_criteria_clause", InternalTraversal.dp_clauseelement),
]
+ HasPrefixes._has_prefixes_traverse_internals
+ DialectKWArgs._dialect_kwargs_traverse_internals
+ ExecutableStatement._executable_traverse_internals
+ HasCTE._has_ctes_traverse_internals
)
_position_map = util.immutabledict(
{"post_criteria": "_post_criteria_clause"}
)
def __init__(self, table: _DMLTableArgument):
self.table = coercions.expect(
roles.DMLTableRole, table, apply_propagate_attrs=self
)
def _apply_syntax_extension_to_self(
self, extension: SyntaxExtension
) -> None:
extension.apply_to_delete(self)
if TYPE_CHECKING:
# START OVERLOADED FUNCTIONS self.returning ReturningDelete 1-8
# code within this block is **programmatically,
# statically generated** by tools/generate_tuple_map_overloads.py
@overload
def returning(self, __ent0: _TCCA[_T0], /) -> ReturningDelete[_T0]: ...
@overload
def returning(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], /
) -> ReturningDelete[_T0, _T1]: ...
@overload
def returning(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], /
) -> ReturningDelete[_T0, _T1, _T2]: ...
@overload
def returning(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
/,
) -> ReturningDelete[_T0, _T1, _T2, _T3]: ...
@overload
def returning(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
/,
) -> ReturningDelete[_T0, _T1, _T2, _T3, _T4]: ...
@overload
def returning(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
/,
) -> ReturningDelete[_T0, _T1, _T2, _T3, _T4, _T5]: ...
@overload
def returning(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
/,
) -> ReturningDelete[_T0, _T1, _T2, _T3, _T4, _T5, _T6]: ...
@overload
def returning(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
__ent7: _TCCA[_T7],
/,
*entities: _ColumnsClauseArgument[Any],
) -> ReturningDelete[
_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, Unpack[TupleAny]
]: ...
# END OVERLOADED FUNCTIONS self.returning
@overload
def returning(
self, *cols: _ColumnsClauseArgument[Any], **__kw: Any
) -> ReturningDelete[Unpack[TupleAny]]: ...
def returning(
self, *cols: _ColumnsClauseArgument[Any], **__kw: Any
) -> ReturningDelete[Unpack[TupleAny]]: ...
|
Delete
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py
|
{
"start": 6044,
"end": 6167
}
|
class ____(ParentI):
def f(self):
__class__ = None
super
builtins.super(ChildI7, self).f()
|
ChildI7
|
python
|
sphinx-doc__sphinx
|
tests/test_markup/test_markup.py
|
{
"start": 2583,
"end": 29521
}
|
class ____(LaTeXTranslator, ForgivingTranslator):
pass
def rst_to_html(rst: str, *, app: SphinxTestApp) -> str:
document = parse_rst(rst, env=app.env)
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
return html_translated
def rst_to_latex(rst: str, *, app: SphinxTestApp) -> str:
document = parse_rst(rst, env=app.env)
app.builder = LaTeXBuilder(app, app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
return latex_translated
@pytest.mark.parametrize(
('rst', 'html_expected', 'latex_expected'),
[
(
# cve role
':cve:`2020-10735`',
(
'<p><span class="target" id="index-0"></span><a class="cve reference external" '
'href="https://www.cve.org/CVERecord?id=CVE-2020-10735">'
'<strong>CVE 2020-10735</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Common Vulnerabilities and Exposures@\\spxentry{Common Vulnerabilities and Exposures}'
'!CVE 2020\\sphinxhyphen{}10735@\\spxentry{CVE 2020\\sphinxhyphen{}10735}}'
'\\sphinxhref{https://www.cve.org/CVERecord?id=CVE-2020-10735}'
'{\\sphinxstylestrong{CVE 2020\\sphinxhyphen{}10735}}'
),
),
(
# cve role with anchor
':cve:`2020-10735#id1`',
(
'<p><span class="target" id="index-0"></span><a class="cve reference external" '
'href="https://www.cve.org/CVERecord?id=CVE-2020-10735#id1">'
'<strong>CVE 2020-10735#id1</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Common Vulnerabilities and Exposures@\\spxentry{Common Vulnerabilities and Exposures}'
'!CVE 2020\\sphinxhyphen{}10735\\#id1@\\spxentry{CVE 2020\\sphinxhyphen{}10735\\#id1}}'
'\\sphinxhref{https://www.cve.org/CVERecord?id=CVE-2020-10735\\#id1}'
'{\\sphinxstylestrong{CVE 2020\\sphinxhyphen{}10735\\#id1}}'
),
),
(
# cwe role
':cwe:`787`',
(
'<p><span class="target" id="index-0"></span><a class="cwe reference external" '
'href="https://cwe.mitre.org/data/definitions/787.html">'
'<strong>CWE 787</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Common Weakness Enumeration@\\spxentry{Common Weakness Enumeration}'
'!CWE 787@\\spxentry{CWE 787}}'
'\\sphinxhref{https://cwe.mitre.org/data/definitions/787.html}'
'{\\sphinxstylestrong{CWE 787}}'
),
),
(
# cwe role with anchor
':cwe:`787#id1`',
(
'<p><span class="target" id="index-0"></span><a class="cwe reference external" '
'href="https://cwe.mitre.org/data/definitions/787.html#id1">'
'<strong>CWE 787#id1</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Common Weakness Enumeration@\\spxentry{Common Weakness Enumeration}'
'!CWE 787\\#id1@\\spxentry{CWE 787\\#id1}}'
'\\sphinxhref{https://cwe.mitre.org/data/definitions/787.html\\#id1}'
'{\\sphinxstylestrong{CWE 787\\#id1}}'
),
),
(
# pep role
':pep:`8`',
(
'<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="https://peps.python.org/pep-0008/"><strong>PEP 8</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{https://peps.python.org/pep-0008/}'
'{\\sphinxstylestrong{PEP 8}}'
),
),
(
# pep role with anchor
':pep:`8#id1`',
(
'<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="https://peps.python.org/pep-0008/#id1">'
'<strong>PEP 8#id1</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{https://peps.python.org/pep-0008/\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}'
),
),
(
# rfc role
':rfc:`2324`',
(
'<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="https://datatracker.ietf.org/doc/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{https://datatracker.ietf.org/doc/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}'
),
),
(
# rfc role with anchor
':rfc:`2324#section-1`',
(
'<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="https://datatracker.ietf.org/doc/html/rfc2324.html#section-1">'
'<strong>RFC 2324 Section 1</strong></a></p>'
),
(
'\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324 Section 1@\\spxentry{RFC 2324 Section 1}}'
'\\sphinxhref{https://datatracker.ietf.org/doc/html/rfc2324.html\\#section-1}'
'{\\sphinxstylestrong{RFC 2324 Section 1}}'
),
),
(
# interpolation of arrows in menuselection
':menuselection:`a --> b`',
'<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>',
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
':menuselection:`&Foo -&&- &Bar`',
(
'<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'
),
(
'\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'
),
),
(
# interpolation of ampersands in guilabel
':guilabel:`&Foo -&&- &Bar`',
(
'<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'
),
(
'\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'
),
),
(
# no ampersands in guilabel
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
':kbd:`Control+X`',
(
'<p>'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</p>'
),
(
'\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Control}}'
'+'
'\\sphinxkeyboard{\\sphinxupquote{X}}'
),
),
(
# kbd role
':kbd:`Alt+^`',
(
'<p>'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</p>'
),
(
'\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt}}'
'+'
'\\sphinxkeyboard{\\sphinxupquote{\\textasciicircum{}}}'
),
),
(
# kbd role
':kbd:`M-x M-s`',
(
'<p>'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</p>'
),
(
'\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M}}'
'\\sphinxhyphen{}'
'\\sphinxkeyboard{\\sphinxupquote{x}}'
' '
'\\sphinxkeyboard{\\sphinxupquote{M}}'
'\\sphinxhyphen{}'
'\\sphinxkeyboard{\\sphinxupquote{s}}'
),
),
(
# kbd role
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}',
),
(
# kbd role
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}',
),
(
# kbd role
':kbd:`sys rq`',
'<p><kbd class="kbd docutils literal notranslate">sys rq</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{sys rq}}',
),
(
# kbd role
':kbd:`β+β§+M`',
(
'<p>'
'<kbd class="kbd docutils literal notranslate">β</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">β§</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'</p>'
),
(
'\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{β}}'
'+'
'\\sphinxkeyboard{\\sphinxupquote{β§}}'
'+'
'\\sphinxkeyboard{\\sphinxupquote{M}}'
),
),
(
# verify smarty-pants quotes
'"John"',
'<p>βJohnβ</p>',
'\\sphinxAtStartPar\nβJohnβ',
),
(
# ... but not in literal text
'``"John"``',
(
'<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'
),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'Ξ\\\\β$',
None,
'\\sphinxAtStartPar\nΞ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'::\n\n @Ξ\\β${}',
None,
(
'\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Ξ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'
),
),
(
# description list: simple
'term\n description',
'<dl class="simple">\n<dt>term</dt><dd><p>description</p>\n</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'term : class1 : class2\n description',
(
'<dl class="simple">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd><p>description</p>\n</dd>\n</dl>'
),
None,
),
(
# glossary (description list): multiple terms
'.. glossary::\n\n term1\n term2\n description',
(
'<dl class="simple glossary">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Link to this term">ΒΆ</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Link to this term">ΒΆ</a></dt>'
'<dd><p>description</p>\n</dd>\n</dl>'
),
None,
),
(
# backslash escaping (docutils 0.16)
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
],
)
@pytest.mark.sphinx('html', testroot='_blank')
def test_inline(
app: SphinxTestApp, rst: str, html_expected: str, latex_expected: str
) -> None:
if html_expected:
html_translated = rst_to_html(rst, app=app)
assert html_expected == html_translated, f'from {rst!r}'
if latex_expected:
latex_translated = rst_to_latex(rst, app=app)
assert latex_expected == latex_translated, f'from {rst!r}'
@pytest.mark.parametrize(
('rst', 'html_expected', 'latex_expected'),
[
(
# correct interpretation of code with whitespace
'``code sample``',
(
'<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'
),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# non-interpolation of dashes in option role
':option:`--with-option`',
(
'<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'
),
(
r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'
),
),
(
# in URIs
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
],
)
@pytest.mark.sphinx('html', testroot='_blank')
def test_inline_regex(
app: SphinxTestApp, rst: str, html_expected: str, latex_expected: str
) -> None:
if html_expected:
html_translated = rst_to_html(rst, app=app)
assert re.match(html_expected, html_translated), f'from {rst!r}'
if latex_expected:
latex_translated = rst_to_latex(rst, app=app)
assert re.match(latex_expected, latex_translated), f'from {rst!r}'
@pytest.mark.sphinx(
'dummy',
testroot='_blank',
confoverrides={'latex_engine': 'xelatex'},
)
@pytest.mark.parametrize(
('rst', 'latex_expected'),
[
(
# in verbatim code fragments
'::\n\n @Ξ\\β${}',
(
'\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Ξ\\PYGZbs{}β\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'
),
),
],
)
def test_inline_for_unicode_latex_engine(
app: SphinxTestApp, rst: str, latex_expected: str
) -> None:
latex_translated = rst_to_latex(rst, app=app)
assert latex_expected == latex_translated, f'from {rst!r}'
@pytest.mark.sphinx('dummy', testroot='_blank')
def test_samp_role(app: SphinxTestApp) -> None:
# no braces
text = ':samp:`a{b}c`'
doctree = parse_rst(text, env=app.env)
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
assert_node(
doctree_0, [nodes.paragraph, nodes.literal, ('a', [nodes.emphasis, 'b'], 'c')]
)
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse_rst(text, env=app.env)
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
assert_node(
doctree_0,
[nodes.paragraph, nodes.literal, ('a', [nodes.emphasis, '{b'], '}c')],
)
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse_rst(text, env=app.env)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, 'a{bc'])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse_rst(text, env=app.env)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, 'a{b}c'])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse_rst(text, env=app.env)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, 'code sample'])
@pytest.mark.sphinx('dummy', testroot='_blank')
def test_download_role(app: SphinxTestApp) -> None:
# implicit
text = ':download:`sphinx.rst`'
doctree = parse_rst(text, env=app.env)
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_0 = doctree_0[0]
assert isinstance(doctree_0_0, nodes.Element)
assert_node(
doctree_0,
[nodes.paragraph, addnodes.download_reference, nodes.literal, 'sphinx.rst'],
)
assert_node(
doctree_0_0,
refdoc='dummy',
refdomain='',
reftype='download',
refexplicit=False,
reftarget='sphinx.rst',
refwarn=False,
)
assert_node(doctree_0_0[0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse_rst(text, env=app.env)
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_0 = doctree_0[0]
assert isinstance(doctree_0_0, nodes.Element)
assert_node(
doctree_0,
[nodes.paragraph, addnodes.download_reference, nodes.literal, 'reftitle'],
)
assert_node(
doctree_0[0],
refdoc='dummy',
refdomain='',
reftype='download',
refexplicit=True,
reftarget='sphinx.rst',
refwarn=False,
)
assert_node(doctree_0_0[0], classes=['xref', 'download'])
@pytest.mark.sphinx('dummy', testroot='_blank')
def test_XRefRole(app: SphinxTestApp) -> None:
inliner = new_inliner(app.env)
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, []) # type: ignore[arg-type]
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(
doctrees[0],
refdoc='dummy',
refdomain='',
reftype='ref',
reftarget='text',
refexplicit=False,
refwarn=False,
)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, []) # type: ignore[arg-type]
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(
doctrees[0],
refdoc='dummy',
refdomain='',
reftype='ref',
reftarget='target',
refexplicit=True,
refwarn=False,
)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, []) # type: ignore[arg-type]
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, []) # type: ignore[arg-type]
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(
doctrees[0],
refdoc='dummy',
refdomain='test',
reftype='doc',
reftarget='text',
refexplicit=False,
refwarn=False,
)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, []) # type: ignore[arg-type]
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(
doctrees[0],
refdoc='dummy',
refdomain='',
reftype='ref',
reftarget='text',
refexplicit=False,
refwarn=False,
)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, []) # type: ignore[arg-type]
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(
doctrees[0],
refdoc='dummy',
refdomain='',
reftype='ref',
reftarget='text',
refexplicit=False,
refwarn=False,
)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app: SphinxTestApp) -> None:
app.build(force_all=True)
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
rst_0 = rst[0]
assert isinstance(rst_0, nodes.Element)
rst_0_0 = rst_0[0]
assert isinstance(rst_0_0, nodes.Element)
assert_node(rst_0, nodes.paragraph)
assert_node(rst_0_0, nodes.emphasis)
assert_node(rst_0_0[0], nodes.Text)
assert rst_0_0[0] == 'Hello world'
# rst_epilog
rst_1 = rst[-1]
assert isinstance(rst_1, nodes.Element)
rst_1_1 = rst_1[-1]
assert isinstance(rst_1_1, nodes.Element)
rst_1_1_0 = rst_1_1[0]
assert isinstance(rst_1_1_0, nodes.Element)
assert_node(rst_1, nodes.section)
assert_node(rst_1_1, nodes.paragraph)
assert_node(rst_1_1_0, nodes.emphasis)
assert_node(rst_1_1_0[0], nodes.Text)
assert rst_1_1_0[0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app: SphinxTestApp) -> None:
app.build(force_all=True)
doctree = app.env.get_doctree('index')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
assert_node(doctree_0, nodes.section)
assert len(doctree_0) == 2
assert_node(doctree_0[1], nodes.system_message)
@pytest.mark.sphinx(
'dummy',
testroot='keep_warnings',
confoverrides={'keep_warnings': False},
)
def test_keep_warnings_is_False(app: SphinxTestApp) -> None:
app.build(force_all=True)
doctree = app.env.get_doctree('index')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
assert_node(doctree_0, nodes.section)
assert len(doctree_0) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app: SphinxTestApp) -> None:
app.build(force_all=True)
doctree = app.env.get_doctree('index')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_2 = doctree_0[2]
assert isinstance(doctree_0_2, nodes.Element)
doctree_0_2_0 = doctree_0_2[0]
assert isinstance(doctree_0_2_0, nodes.Element)
doctree_0_4 = doctree_0[4]
assert isinstance(doctree_0_4, nodes.Element)
doctree_0_4_0 = doctree_0_4[0]
assert isinstance(doctree_0_4_0, nodes.Element)
assert_node(doctree_0, nodes.section)
assert len(doctree_0) == 5
assert doctree_0[1].astext() == 'List A:'
assert_node(doctree_0_2, nodes.bullet_list)
assert_node(doctree_0_2_0[0], addnodes.compact_paragraph)
assert doctree_0_2_0[0].astext() == 'genindex'
assert doctree_0[3].astext() == 'List B:'
assert_node(doctree_0_4, nodes.bullet_list)
assert_node(doctree_0_4_0[0], nodes.paragraph)
assert doctree_0_4_0[0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app: SphinxTestApp) -> None:
app.build(force_all=True)
# default-role: pep
doctree = app.env.get_doctree('index')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_1 = doctree_0[1]
assert isinstance(doctree_0_1, nodes.Element)
assert_node(doctree_0, nodes.section)
assert_node(doctree_0_1, nodes.paragraph)
assert_node(doctree_0_1[0], addnodes.index)
assert_node(doctree_0_1[1], nodes.target)
assert_node(doctree_0_1[2], nodes.reference, classes=['pep'])
# no default-role
doctree = app.env.get_doctree('foo')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_1 = doctree_0[1]
assert isinstance(doctree_0_1, nodes.Element)
assert_node(doctree_0, nodes.section)
assert_node(doctree_0_1, nodes.paragraph)
assert_node(doctree_0_1[0], nodes.title_reference)
assert_node(doctree_0_1[1], nodes.Text)
@pytest.mark.sphinx(
'dummy',
testroot='default_role',
confoverrides={'default_role': 'guilabel'},
)
def test_default_role2(app: SphinxTestApp) -> None:
app.build(force_all=True)
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_1 = doctree_0[1]
assert isinstance(doctree_0_1, nodes.Element)
assert_node(doctree_0, nodes.section)
assert_node(doctree_0_1, nodes.paragraph)
assert_node(doctree_0_1[0], addnodes.index)
assert_node(doctree_0_1[1], nodes.target)
assert_node(doctree_0_1[2], nodes.reference, classes=['pep'])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
doctree_0 = doctree[0]
assert isinstance(doctree_0, nodes.Element)
doctree_0_1 = doctree_0[1]
assert isinstance(doctree_0_1, nodes.Element)
assert_node(doctree_0, nodes.section)
assert_node(doctree_0_1, nodes.paragraph)
assert_node(doctree_0_1[0], nodes.inline, classes=['guilabel'])
assert_node(doctree_0_1[1], nodes.Text)
|
ForgivingLaTeXTranslator
|
python
|
huggingface__transformers
|
src/transformers/models/zoedepth/modeling_zoedepth.py
|
{
"start": 6452,
"end": 7460
}
|
class ____(nn.Module):
def __init__(self, config: ZoeDepthConfig):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(len(config.neck_hidden_sizes)):
self.layers.append(ZoeDepthFeatureFusionLayer(config))
def forward(self, hidden_states):
# reversing the hidden_states, we start from the last
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for hidden_state, layer in zip(hidden_states, self.layers):
if fused_hidden_state is None:
# first layer only uses the last hidden_state
fused_hidden_state = layer(hidden_state)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
# Copied from transformers.models.dpt.modeling_dpt.DPTPreActResidualLayer with DPT->ZoeDepth
|
ZoeDepthFeatureFusionStage
|
python
|
huggingface__transformers
|
src/transformers/models/flaubert/modeling_flaubert.py
|
{
"start": 12711,
"end": 15651
}
|
class ____(nn.Module):
"""
Compute SQuAD end logits from sequence hidden states.
Args:
config ([`FlaubertConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
to use.
"""
def __init__(self, config: FlaubertConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
The hidden states of the first tokens for the labeled span.
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
The position of the first token for the labeled span.
p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
<Tip>
One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
`start_states`.
</Tip>
Returns:
`torch.FloatTensor`: The end logits for SQuAD.
"""
assert start_states is not None or start_positions is not None, (
"One of start_states, start_positions should be not None"
)
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if p_mask.dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
# Copied from transformers.models.xlm.modeling_xlm.XLMPoolerAnswerClass with XLM->Flaubert
|
FlaubertPoolerEndLogits
|
python
|
facebook__pyre-check
|
tools/upgrade/commands/fixme_all.py
|
{
"start": 760,
"end": 3032
}
|
class ____(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
repository: Repository,
upgrade_version: bool,
error_source: ErrorSource,
) -> None:
super().__init__(command_arguments, repository=repository)
self._upgrade_version: bool = upgrade_version
self._error_source: ErrorSource = error_source
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "FixmeAll":
command_arguments = CommandArguments.from_arguments(arguments)
return FixmeAll(
command_arguments,
repository=repository,
upgrade_version=arguments.upgrade_version,
error_source=arguments.error_source,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(FixmeAll, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"--upgrade-version",
action="store_true",
help="Upgrade and clean project if a version override set.",
)
parser.add_argument(
"--error-source",
choices=list(ErrorSource),
default=ErrorSource.GENERATE,
type=ErrorSource,
)
@override
def run(self) -> None:
project_configuration = Configuration.find_project_configuration()
configurations = Configuration.gather_local_configurations()
for configuration in configurations:
self._get_and_suppress_errors(
configuration=configuration,
error_source=self._error_source,
upgrade_version=self._upgrade_version,
)
local_root = configuration.get_directory().resolve()
title = "{} for {}".format(
(
"Update pyre version"
if self._upgrade_version
else "Suppress pyre errors"
),
str(local_root.relative_to(project_configuration.parent.resolve())),
)
self._repository.commit_changes(commit=(not self._no_commit), title=title)
|
FixmeAll
|
python
|
sphinx-doc__sphinx
|
sphinx/addnodes.py
|
{
"start": 13299,
"end": 13446
}
|
class ____(nodes.Element):
"""Node for "horizontal lists", i.e. lists that should be compressed to
take up less vertical space.
"""
|
hlist
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_poly_loading.py
|
{
"start": 5367,
"end": 5915
}
|
class ____(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = True
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(
selectin_polymorphic(A, [ASub]),
selectinload(ASub.cs),
selectinload(A.bs),
)
)
self._assert_all_selectin(q)
|
LoadBaseAndSubWEagerRelOpt
|
python
|
walkccc__LeetCode
|
solutions/425. Word Squares/425.py
|
{
"start": 120,
"end": 615
}
|
class ____:
def __init__(self, words: list[str]):
self.root = TrieNode()
for word in words:
self._insert(word)
def findBy(self, prefix: str) -> list[str]:
node = self.root
for c in prefix:
if c not in node.children:
return []
node = node.children[c]
return node.startsWith
def _insert(self, word: str) -> None:
node = self.root
for c in word:
node = node.children.setdefault(c, TrieNode())
node.startsWith.append(word)
|
Trie
|
python
|
euske__pdfminer
|
pdfminer/pdfinterp.py
|
{
"start": 1047,
"end": 1273
}
|
class ____(PDFException):
pass
## Constants
##
LITERAL_PDF = LIT('PDF')
LITERAL_TEXT = LIT('Text')
LITERAL_FONT = LIT('Font')
LITERAL_FORM = LIT('Form')
LITERAL_IMAGE = LIT('Image')
## PDFTextState
##
|
PDFInterpreterError
|
python
|
huggingface__transformers
|
src/transformers/models/informer/modular_informer.py
|
{
"start": 14458,
"end": 15385
}
|
class ____(TimeSeriesTransformerDecoderLayer):
def __init__(self, config: InformerConfig, layer_idx: Optional[int] = None):
super().__init__(config)
del self.self_attn
if config.attention_type == "prob":
self.self_attn = InformerProbSparseAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
sampling_factor=config.sampling_factor,
is_decoder=True,
layer_idx=layer_idx,
)
else:
self.self_attn = InformerAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
|
InformerDecoderLayer
|
python
|
keon__algorithms
|
tests/test_maths.py
|
{
"start": 9826,
"end": 10717
}
|
class ____(unittest.TestCase):
"""[summary]
Test for the file factorial.py
Arguments:
unittest {[type]} -- [description]
"""
def test_factorial(self):
self.assertEqual(1, factorial(0))
self.assertEqual(120, factorial(5))
self.assertEqual(3628800, factorial(10))
self.assertEqual(637816310, factorial(34521, 10 ** 9 + 7))
self.assertRaises(ValueError, factorial, -42)
self.assertRaises(ValueError, factorial, 42, -1)
def test_factorial_recur(self):
self.assertEqual(1, factorial_recur(0))
self.assertEqual(120, factorial_recur(5))
self.assertEqual(3628800, factorial_recur(10))
self.assertEqual(637816310, factorial_recur(34521, 10 ** 9 + 7))
self.assertRaises(ValueError, factorial_recur, -42)
self.assertRaises(ValueError, factorial_recur, 42, -1)
|
TestFactorial
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/unresolver.py
|
{
"start": 2962,
"end": 3179
}
|
class ____(Enum):
"""Where the custom domain was resolved from."""
custom_domain = auto()
public_domain = auto()
external_domain = auto()
http_header = auto()
@dataclass(slots=True)
|
DomainSourceType
|
python
|
PyCQA__pylint
|
doc/data/messages/t/too-many-positional-sub-patterns/bad.py
|
{
"start": 0,
"end": 316
}
|
class ____:
__match_args__ = ("title", "year")
def __init__(self, title, year, author):
self.title = title
self.year = year
self.author = author
def func(item: Book):
match item:
case Book("title", 2000, "author"): # [too-many-positional-sub-patterns]
...
|
Book
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.