language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
openai__gym
|
gym/error.py
|
{
"start": 125,
"end": 250
}
|
class ____(Error):
"""Raised when the user requests an item from the registry that does not actually exist."""
|
Unregistered
|
python
|
google__pytype
|
pytype/constant_folding.py
|
{
"start": 3020,
"end": 3187
}
|
class ____:
"""A linear collection (e.g. list, tuple, set)."""
types: frozenset[Any]
values: tuple[Any, ...]
elements: tuple[Any, ...]
@attrs.define
|
_Collection
|
python
|
huggingface__transformers
|
src/transformers/integrations/integration_utils.py
|
{
"start": 89003,
"end": 91487
}
|
class ____(TrainerCallback):
"""A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/).
NOTE: This callback only works within a Flyte task.
Args:
save_log_history (`bool`, *optional*, defaults to `True`):
When set to True, the training logs are saved as a Flyte Deck.
sync_checkpoints (`bool`, *optional*, defaults to `True`):
When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an
interruption.
Example:
```python
# Note: This example skips over some setup steps for brevity.
from flytekit import current_context, task
@task
def train_hf_transformer():
cp = current_context().checkpoint
trainer = Trainer(..., callbacks=[FlyteCallback()])
output = trainer.train(resume_from_checkpoint=cp.restore())
```
"""
def __init__(self, save_log_history: bool = True, sync_checkpoints: bool = True):
super().__init__()
if not is_flytekit_available():
raise ImportError("FlyteCallback requires flytekit to be installed. Run `pip install flytekit`.")
if not is_flyte_deck_standard_available() or not is_pandas_available():
logger.warning(
"Syncing log history requires both flytekitplugins-deck-standard and pandas to be installed. "
"Run `pip install flytekitplugins-deck-standard pandas` to enable this feature."
)
save_log_history = False
from flytekit import current_context
self.cp = current_context().checkpoint
self.save_log_history = save_log_history
self.sync_checkpoints = sync_checkpoints
def on_save(self, args, state, control, **kwargs):
if self.sync_checkpoints and state.is_world_process_zero:
ckpt_dir = f"checkpoint-{state.global_step}"
artifact_path = os.path.join(args.output_dir, ckpt_dir)
logger.info(f"Syncing checkpoint in {ckpt_dir} to Flyte. This may take time.")
self.cp.save(artifact_path)
def on_train_end(self, args, state, control, **kwargs):
if self.save_log_history:
import pandas as pd
from flytekit import Deck
from flytekitplugins.deck.renderer import TableRenderer
log_history_df = pd.DataFrame(state.log_history)
Deck("Log History", TableRenderer().to_html(log_history_df))
|
FlyteCallback
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/plugin_tests.py
|
{
"start": 600,
"end": 761
}
|
class ____(base.Config):
foo = c.Type(str, default='default foo')
bar = c.Type(int, default=0)
dir = c.Optional(c.Dir(exists=False))
|
_DummyPluginConfig
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_config/pythonic_config/io_manager.py
|
{
"start": 1178,
"end": 3336
}
|
class ____( # pyright: ignore[reportIncompatibleMethodOverride]
NestedResourcesResourceDefinition,
IOManagerDefinition,
):
def __init__(
self,
configurable_resource_cls: type,
resource_fn: ResourceFunction,
config_schema: Any,
description: Optional[str],
nested_resources: Mapping[str, Any],
nested_partial_resources: Mapping[str, Any],
input_config_schema: Optional[Union[CoercableToConfigSchema, type[Config]]] = None,
output_config_schema: Optional[Union[CoercableToConfigSchema, type[Config]]] = None,
dagster_maintained: bool = False,
):
input_config_schema_resolved: CoercableToConfigSchema = (
cast("type[Config]", input_config_schema).to_config_schema()
if safe_is_subclass(input_config_schema, Config)
else cast("CoercableToConfigSchema", input_config_schema)
)
output_config_schema_resolved: CoercableToConfigSchema = (
cast("type[Config]", output_config_schema).to_config_schema()
if safe_is_subclass(output_config_schema, Config)
else cast("CoercableToConfigSchema", output_config_schema)
)
super().__init__(
resource_fn=resource_fn,
config_schema=config_schema,
description=description,
input_config_schema=input_config_schema_resolved,
output_config_schema=output_config_schema_resolved,
)
self._nested_partial_resources = nested_partial_resources
self._nested_resources = nested_resources
self._configurable_resource_cls = configurable_resource_cls
self._dagster_maintained = dagster_maintained
@property
def configurable_resource_cls(self) -> type:
return self._configurable_resource_cls
@property
def nested_resources(
self,
) -> Mapping[str, Any]:
return self._nested_resources
@property
def nested_partial_resources(
self,
) -> Mapping[str, "CoercibleToResource"]:
return self._nested_partial_resources
@public
|
ConfigurableIOManagerFactoryResourceDefinition
|
python
|
pytorch__pytorch
|
torchgen/_autoheuristic/train_decision.py
|
{
"start": 25383,
"end": 25754
}
|
class ____:
accuracy: AccuracyMetrics
speedup: WrongSpeedupMetrics
ranking: RankingMetrics
default_comparison: DefaultComparisonMetrics
def to_map(self):
return {
**self.accuracy.to_map(),
**self.speedup.to_map(),
**self.ranking.to_map(),
**self.default_comparison.to_map(),
}
|
EvalResults
|
python
|
django-import-export__django-import-export
|
tests/core/migrations/0005_addparentchild.py
|
{
"start": 76,
"end": 1357
}
|
class ____(migrations.Migration):
dependencies = [
("core", "0004_bookwithchapters"),
]
operations = [
migrations.CreateModel(
name="Child",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name="Parent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name="child",
name="parent",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.Parent"
),
),
]
|
Migration
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 1666,
"end": 1966
}
|
class ____(Super):
# pylint: disable=unused-argument
def __init__(self, arg):
super().__init__()
def __private(self, arg):
pass
def __private2_(self, arg):
pass
def ___private3(self, arg):
pass
def method(self, param='abc'):
pass
|
Sub
|
python
|
django__django
|
tests/generic_relations/models.py
|
{
"start": 3105,
"end": 3254
}
|
class ____(models.Model):
id = models.IntegerField(primary_key=True)
tags = GenericRelation(TaggedItem, related_query_name="manualpk")
|
ManualPK
|
python
|
PyCQA__pylint
|
pylint/pyreverse/diagrams.py
|
{
"start": 1113,
"end": 1646
}
|
class ____(Figure):
"""A diagram object, i.e. a label associated to an astroid node."""
default_shape = ""
def __init__(
self, title: str = "No name", node: nodes.NodeNG | None = None
) -> None:
super().__init__()
self.title = title
self.node: nodes.NodeNG = node or nodes.NodeNG(
lineno=None,
col_offset=None,
end_lineno=None,
end_col_offset=None,
parent=None,
)
self.shape = self.default_shape
|
DiagramEntity
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/side_channel/environment_parameters_channel.py
|
{
"start": 219,
"end": 3874
}
|
class ____(SideChannel):
"""
This is the SideChannel for sending environment parameters to Unity.
You can send parameters to an environment with the command
set_float_parameter.
"""
class EnvironmentDataTypes(IntEnum):
FLOAT = 0
SAMPLER = 1
class SamplerTypes(IntEnum):
UNIFORM = 0
GAUSSIAN = 1
MULTIRANGEUNIFORM = 2
def __init__(self) -> None:
channel_id = uuid.UUID("534c891e-810f-11ea-a9d0-822485860400")
super().__init__(channel_id)
def on_message_received(self, msg: IncomingMessage) -> None:
raise UnityCommunicationException(
"The EnvironmentParametersChannel received a message from Unity, "
+ "this should not have happened."
)
def set_float_parameter(self, key: str, value: float) -> None:
"""
Sets a float environment parameter in the Unity Environment.
:param key: The string identifier of the parameter.
:param value: The float value of the parameter.
"""
msg = OutgoingMessage()
msg.write_string(key)
msg.write_int32(self.EnvironmentDataTypes.FLOAT)
msg.write_float32(value)
super().queue_message_to_send(msg)
def set_uniform_sampler_parameters(
self, key: str, min_value: float, max_value: float, seed: int
) -> None:
"""
Sets a uniform environment parameter sampler.
:param key: The string identifier of the parameter.
:param min_value: The minimum of the sampling distribution.
:param max_value: The maximum of the sampling distribution.
:param seed: The random seed to initialize the sampler.
"""
msg = OutgoingMessage()
msg.write_string(key)
msg.write_int32(self.EnvironmentDataTypes.SAMPLER)
msg.write_int32(seed)
msg.write_int32(self.SamplerTypes.UNIFORM)
msg.write_float32(min_value)
msg.write_float32(max_value)
super().queue_message_to_send(msg)
def set_gaussian_sampler_parameters(
self, key: str, mean: float, st_dev: float, seed: int
) -> None:
"""
Sets a gaussian environment parameter sampler.
:param key: The string identifier of the parameter.
:param mean: The mean of the sampling distribution.
:param st_dev: The standard deviation of the sampling distribution.
:param seed: The random seed to initialize the sampler.
"""
msg = OutgoingMessage()
msg.write_string(key)
msg.write_int32(self.EnvironmentDataTypes.SAMPLER)
msg.write_int32(seed)
msg.write_int32(self.SamplerTypes.GAUSSIAN)
msg.write_float32(mean)
msg.write_float32(st_dev)
super().queue_message_to_send(msg)
def set_multirangeuniform_sampler_parameters(
self, key: str, intervals: List[Tuple[float, float]], seed: int
) -> None:
"""
Sets a multirangeuniform environment parameter sampler.
:param key: The string identifier of the parameter.
:param intervals: The lists of min and max that define each uniform distribution.
:param seed: The random seed to initialize the sampler.
"""
msg = OutgoingMessage()
msg.write_string(key)
msg.write_int32(self.EnvironmentDataTypes.SAMPLER)
msg.write_int32(seed)
msg.write_int32(self.SamplerTypes.MULTIRANGEUNIFORM)
flattened_intervals = [value for interval in intervals for value in interval]
msg.write_float32_list(flattened_intervals)
super().queue_message_to_send(msg)
|
EnvironmentParametersChannel
|
python
|
sympy__sympy
|
sympy/tensor/array/expressions/array_expressions.py
|
{
"start": 27862,
"end": 36629
}
|
class ____(_CodegenArrayAbstract):
r"""
Class to represent the diagonal operator.
Explanation
===========
In a 2-dimensional array it returns the diagonal, this looks like the
operation:
`A_{ij} \rightarrow A_{ii}`
The diagonal over axes 1 and 2 (the second and third) of the tensor product
of two 2-dimensional arrays `A \otimes B` is
`\Big[ A_{ab} B_{cd} \Big]_{abcd} \rightarrow \Big[ A_{ai} B_{id} \Big]_{adi}`
In this last example the array expression has been reduced from
4-dimensional to 3-dimensional. Notice that no contraction has occurred,
rather there is a new index `i` for the diagonal, contraction would have
reduced the array to 2 dimensions.
Notice that the diagonalized out dimensions are added as new dimensions at
the end of the indices.
"""
def __new__(cls, expr, *diagonal_indices, **kwargs):
expr = _sympify(expr)
diagonal_indices = [Tuple(*sorted(i)) for i in diagonal_indices]
canonicalize = kwargs.get("canonicalize", False)
shape = get_shape(expr)
if shape is not None:
cls._validate(expr, *diagonal_indices, **kwargs)
# Get new shape:
positions, shape = cls._get_positions_shape(shape, diagonal_indices)
else:
positions = None
if len(diagonal_indices) == 0:
return expr
obj = Basic.__new__(cls, expr, *diagonal_indices)
obj._positions = positions
obj._subranks = _get_subranks(expr)
obj._shape = shape
if canonicalize:
return obj._canonicalize()
return obj
def _canonicalize(self):
expr = self.expr
diagonal_indices = self.diagonal_indices
trivial_diags = [i for i in diagonal_indices if len(i) == 1]
if len(trivial_diags) > 0:
trivial_pos = {e[0]: i for i, e in enumerate(diagonal_indices) if len(e) == 1}
diag_pos = {e: i for i, e in enumerate(diagonal_indices) if len(e) > 1}
diagonal_indices_short = [i for i in diagonal_indices if len(i) > 1]
rank1 = get_rank(self)
rank2 = len(diagonal_indices)
rank3 = rank1 - rank2
inv_permutation = []
counter1 = 0
indices_down = ArrayDiagonal._push_indices_down(diagonal_indices_short, list(range(rank1)), get_rank(expr))
for i in indices_down:
if i in trivial_pos:
inv_permutation.append(rank3 + trivial_pos[i])
elif isinstance(i, (Integer, int)):
inv_permutation.append(counter1)
counter1 += 1
else:
inv_permutation.append(rank3 + diag_pos[i])
permutation = _af_invert(inv_permutation)
if len(diagonal_indices_short) > 0:
return _permute_dims(_array_diagonal(expr, *diagonal_indices_short), permutation)
else:
return _permute_dims(expr, permutation)
if isinstance(expr, ArrayAdd):
return self._ArrayDiagonal_denest_ArrayAdd(expr, *diagonal_indices)
if isinstance(expr, ArrayDiagonal):
return self._ArrayDiagonal_denest_ArrayDiagonal(expr, *diagonal_indices)
if isinstance(expr, PermuteDims):
return self._ArrayDiagonal_denest_PermuteDims(expr, *diagonal_indices)
if isinstance(expr, (ZeroArray, ZeroMatrix)):
positions, shape = self._get_positions_shape(expr.shape, diagonal_indices)
return ZeroArray(*shape)
return self.func(expr, *diagonal_indices, canonicalize=False)
@staticmethod
def _validate(expr, *diagonal_indices, **kwargs):
# Check that no diagonalization happens on indices with mismatched
# dimensions:
shape = get_shape(expr)
for i in diagonal_indices:
if any(j >= len(shape) for j in i):
raise ValueError("index is larger than expression shape")
if len({shape[j] for j in i}) != 1:
raise ValueError("diagonalizing indices of different dimensions")
if not kwargs.get("allow_trivial_diags", False) and len(i) <= 1:
raise ValueError("need at least two axes to diagonalize")
if len(set(i)) != len(i):
raise ValueError("axis index cannot be repeated")
@staticmethod
def _remove_trivial_dimensions(shape, *diagonal_indices):
return [tuple(j for j in i) for i in diagonal_indices if shape[i[0]] != 1]
@property
def expr(self):
return self.args[0]
@property
def diagonal_indices(self):
return self.args[1:]
@staticmethod
def _flatten(expr, *outer_diagonal_indices):
inner_diagonal_indices = expr.diagonal_indices
all_inner = [j for i in inner_diagonal_indices for j in i]
all_inner.sort()
# TODO: add API for total rank and cumulative rank:
total_rank = _get_subrank(expr)
inner_rank = len(all_inner)
outer_rank = total_rank - inner_rank
shifts = [0 for i in range(outer_rank)]
counter = 0
pointer = 0
for i in range(outer_rank):
while pointer < inner_rank and counter >= all_inner[pointer]:
counter += 1
pointer += 1
shifts[i] += pointer
counter += 1
outer_diagonal_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_diagonal_indices)
diagonal_indices = inner_diagonal_indices + outer_diagonal_indices
return _array_diagonal(expr.expr, *diagonal_indices)
@classmethod
def _ArrayDiagonal_denest_ArrayAdd(cls, expr, *diagonal_indices):
return _array_add(*[_array_diagonal(arg, *diagonal_indices) for arg in expr.args])
@classmethod
def _ArrayDiagonal_denest_ArrayDiagonal(cls, expr, *diagonal_indices):
return cls._flatten(expr, *diagonal_indices)
@classmethod
def _ArrayDiagonal_denest_PermuteDims(cls, expr: PermuteDims, *diagonal_indices):
back_diagonal_indices = [[expr.permutation(j) for j in i] for i in diagonal_indices]
nondiag = [i for i in range(get_rank(expr)) if not any(i in j for j in diagonal_indices)]
back_nondiag = [expr.permutation(i) for i in nondiag]
remap = {e: i for i, e in enumerate(sorted(back_nondiag))}
new_permutation1 = [remap[i] for i in back_nondiag]
shift = len(new_permutation1)
diag_block_perm = [i + shift for i in range(len(back_diagonal_indices))]
new_permutation = new_permutation1 + diag_block_perm
return _permute_dims(
_array_diagonal(
expr.expr,
*back_diagonal_indices
),
new_permutation
)
def _push_indices_down_nonstatic(self, indices):
transform = lambda x: self._positions[x] if x < len(self._positions) else None
return _apply_recursively_over_nested_lists(transform, indices)
def _push_indices_up_nonstatic(self, indices):
def transform(x):
for i, e in enumerate(self._positions):
if (isinstance(e, int) and x == e) or (isinstance(e, tuple) and x in e):
return i
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_down(cls, diagonal_indices, indices, rank):
positions, shape = cls._get_positions_shape(range(rank), diagonal_indices)
transform = lambda x: positions[x] if x < len(positions) else None
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_up(cls, diagonal_indices, indices, rank):
positions, shape = cls._get_positions_shape(range(rank), diagonal_indices)
def transform(x):
for i, e in enumerate(positions):
if (isinstance(e, int) and x == e) or (isinstance(e, (tuple, Tuple)) and (x in e)):
return i
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _get_positions_shape(cls, shape, diagonal_indices):
data1 = tuple((i, shp) for i, shp in enumerate(shape) if not any(i in j for j in diagonal_indices))
pos1, shp1 = zip(*data1) if data1 else ((), ())
data2 = tuple((i, shape[i[0]]) for i in diagonal_indices)
pos2, shp2 = zip(*data2) if data2 else ((), ())
positions = pos1 + pos2
shape = shp1 + shp2
return positions, shape
def as_explicit(self):
expr = self.expr
if hasattr(expr, "as_explicit"):
expr = expr.as_explicit()
return tensordiagonal(expr, *self.diagonal_indices)
|
ArrayDiagonal
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-self-rag/llama_index/packs/self_rag/base.py
|
{
"start": 4784,
"end": 9493
}
|
class ____(CustomQueryEngine):
"""Simple short form self RAG query engine."""
llm: Any = Field(default=None, description="llm")
retriever: BaseRetriever = Field(default=None, description="retriever")
generate_kwargs: Dict = Field(default=None, description="llm generation arguments")
verbose: bool = Field(default=True, description="Verbose.")
def __init__(
self,
model_path: str,
retriever: BaseRetriever,
verbose: bool = False,
model_kwargs: Dict = None,
generate_kwargs: Dict = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(verbose=verbose, **kwargs)
model_kwargs = model_kwargs or _MODEL_KWARGS
self.generate_kwargs = generate_kwargs or _GENERATE_KWARGS
try:
from llama_cpp import Llama
except ImportError:
raise ImportError(_IMPORT_ERROR_MSG)
self.llm = Llama(model_path=model_path, verbose=verbose, **model_kwargs)
self.retriever = retriever
def _run_critic(self, paragraphs: List[str]) -> CriticOutput:
"""
Run Critic component, the llm will generate responses based on the paragraphs and then evaluate them.
Args:
paragraphs (List[str]): List of paragraphs to evaluate
Returns:
CriticOutput: Paragraphs final score, LLM predictions and source nodes
"""
paragraphs_final_score = {}
llm_response_text = {}
source_nodes = []
for p_idx, paragraph in enumerate(paragraphs):
pred = self.llm(paragraph, **self.generate_kwargs)
# Cache llm answer
llm_response_text[p_idx] = pred["choices"][0]["text"]
logprobs = pred["choices"][0]["logprobs"]
pred_log_probs = logprobs["top_logprobs"]
# Compute isRel score, on the first predicted token
isRel_score = _relevance_score(pred_log_probs[0])
# Compute isSup score
isSup_score = _is_supported_score(logprobs["tokens"], pred_log_probs)
# Compute isUse score
isUse_score = _is_useful_score(logprobs["tokens"], pred_log_probs)
paragraphs_final_score[p_idx] = (
isRel_score + isSup_score + 0.5 * isUse_score
)
# Add the paragraph as source node with its relevance score
source_nodes.append(
NodeWithScore(
node=TextNode(text=paragraph, id_=str(p_idx)),
score=isRel_score,
)
)
if self.verbose:
print_text(
f"Input: {paragraph}\nPrediction: {llm_response_text[p_idx]}\nScore: {paragraphs_final_score[p_idx]}\n",
color="blue",
)
print_text(
f"{p_idx + 1}/{len(paragraphs)} paragraphs done\n\n", color="blue"
)
return CriticOutput(llm_response_text, paragraphs_final_score, source_nodes)
def custom_query(self, query_str: str) -> Response:
"""Run self-RAG."""
response = self.llm(prompt=_format_prompt(query_str), **_GENERATE_KWARGS)
answer = response["choices"][0]["text"]
source_nodes = []
if "[Retrieval]" in answer:
if self.verbose:
print_text("Retrieval required\n", color="blue")
documents = self.retriever.retrieve(query_str)
if self.verbose:
print_text(f"Received: {len(documents)} documents\n", color="blue")
paragraphs = [
_format_prompt(query_str, document.node.text) for document in documents
]
if self.verbose:
print_text("Start evaluation\n", color="blue")
critic_output = self._run_critic(paragraphs)
paragraphs_final_score = critic_output.paragraphs_final_score
llm_response_per_paragraph = critic_output.llm_response_per_paragraph
source_nodes = critic_output.source_nodes
if self.verbose:
print_text("End evaluation\n", color="blue")
best_paragraph_id = max(
paragraphs_final_score, key=paragraphs_final_score.get
)
answer = llm_response_per_paragraph[best_paragraph_id]
if self.verbose:
print_text(f"Selected the best answer: {answer}\n", color="blue")
answer = _postprocess_answer(answer)
if self.verbose:
print_text(f"Final answer: {answer}\n", color="green")
return Response(response=str(answer), source_nodes=source_nodes)
|
SelfRAGQueryEngine
|
python
|
pypa__pip
|
src/pip/_vendor/rich/rule.py
|
{
"start": 276,
"end": 4598
}
|
class ____(JupyterMixin):
"""A console renderable to draw a horizontal rule (line).
Args:
title (Union[str, Text], optional): Text to render in the rule. Defaults to "".
characters (str, optional): Character(s) used to draw the line. Defaults to "─".
style (StyleType, optional): Style of Rule. Defaults to "rule.line".
end (str, optional): Character at end of Rule. defaults to "\\\\n"
align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
"""
def __init__(
self,
title: Union[str, Text] = "",
*,
characters: str = "─",
style: Union[str, Style] = "rule.line",
end: str = "\n",
align: AlignMethod = "center",
) -> None:
if cell_len(characters) < 1:
raise ValueError(
"'characters' argument must have a cell width of at least 1"
)
if align not in ("left", "center", "right"):
raise ValueError(
f'invalid value for align, expected "left", "center", "right" (not {align!r})'
)
self.title = title
self.characters = characters
self.style = style
self.end = end
self.align = align
def __repr__(self) -> str:
return f"Rule({self.title!r}, {self.characters!r})"
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = options.max_width
characters = (
"-"
if (options.ascii_only and not self.characters.isascii())
else self.characters
)
chars_len = cell_len(characters)
if not self.title:
yield self._rule_line(chars_len, width)
return
if isinstance(self.title, Text):
title_text = self.title
else:
title_text = console.render_str(self.title, style="rule.text")
title_text.plain = title_text.plain.replace("\n", " ")
title_text.expand_tabs()
required_space = 4 if self.align == "center" else 2
truncate_width = max(0, width - required_space)
if not truncate_width:
yield self._rule_line(chars_len, width)
return
rule_text = Text(end=self.end)
if self.align == "center":
title_text.truncate(truncate_width, overflow="ellipsis")
side_width = (width - cell_len(title_text.plain)) // 2
left = Text(characters * (side_width // chars_len + 1))
left.truncate(side_width - 1)
right_length = width - cell_len(left.plain) - cell_len(title_text.plain)
right = Text(characters * (side_width // chars_len + 1))
right.truncate(right_length)
rule_text.append(left.plain + " ", self.style)
rule_text.append(title_text)
rule_text.append(" " + right.plain, self.style)
elif self.align == "left":
title_text.truncate(truncate_width, overflow="ellipsis")
rule_text.append(title_text)
rule_text.append(" ")
rule_text.append(characters * (width - rule_text.cell_len), self.style)
elif self.align == "right":
title_text.truncate(truncate_width, overflow="ellipsis")
rule_text.append(characters * (width - title_text.cell_len - 1), self.style)
rule_text.append(" ")
rule_text.append(title_text)
rule_text.plain = set_cell_size(rule_text.plain, width)
yield rule_text
def _rule_line(self, chars_len: int, width: int) -> Text:
rule_text = Text(self.characters * ((width // chars_len) + 1), self.style)
rule_text.truncate(width)
rule_text.plain = set_cell_size(rule_text.plain, width)
return rule_text
def __rich_measure__(
self, console: Console, options: ConsoleOptions
) -> Measurement:
return Measurement(1, 1)
if __name__ == "__main__": # pragma: no cover
import sys
from pip._vendor.rich.console import Console
try:
text = sys.argv[1]
except IndexError:
text = "Hello, World"
console = Console()
console.print(Rule(title=text))
console = Console()
console.print(Rule("foo"), width=4)
|
Rule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/platform/benchmark.py
|
{
"start": 7417,
"end": 10411
}
|
class ____(metaclass=_BenchmarkRegistrar):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
@classmethod
def is_abstract(cls):
# mro: (_BenchmarkRegistrar, Benchmark) means this is Benchmark
return len(cls.mro()) <= 2
def _get_name(self, overwrite_name=None):
"""Returns full name of class and method calling report_benchmark."""
# Find the caller method (outermost Benchmark class)
stack = tf_inspect.stack()
calling_class = None
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, Benchmark):
calling_class = f_self # Get the outermost stack Benchmark call
name = frame[3] # Get the method name
break
if calling_class is None:
raise ValueError("Unable to determine calling Benchmark class.")
# Use the method name, or overwrite_name is provided.
name = overwrite_name or name
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None,
metrics=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Median or mean cpu time in seconds.
wall_time: (optional) Median or mean wall time in seconds.
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
metrics: (optional) A list of dict, where each dict has the keys below
name (required), string, metric name
value (required), double, metric value
min_value (optional), double, minimum acceptable metric value
max_value (optional), double, maximum acceptable metric value
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras, metrics=metrics)
@tf_export("test.benchmark_config")
def benchmark_config():
"""Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer.
Returns:
A TensorFlow ConfigProto object.
"""
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
@tf_export("test.Benchmark")
|
Benchmark
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/validators/test_base_data_condition.py
|
{
"start": 2271,
"end": 2963
}
|
class ____(DataConditionHandler[dict[str, Any]]):
comparison_json_schema = {
"type": "object",
"properties": {
"foo": {
"type": ["string"],
},
},
"required": ["foo"],
"additionalProperties": False,
}
condition_result_schema = {
"type": "object",
"properties": {
"bar": {
"type": ["string"],
},
},
"required": ["bar"],
"additionalProperties": False,
}
@mock.patch(
"sentry.workflow_engine.registry.condition_handler_registry.get",
return_value=MockComplexDataConditionHandler,
)
|
MockComplexDataConditionHandler
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 16602,
"end": 16933
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Buffer, types.BaseTuple)):
return signature(types.intp, val)
elif isinstance(val, (types.RangeType)):
return signature(val.dtype, val)
@infer_global(tuple)
|
Len
|
python
|
PrefectHQ__prefect
|
src/prefect/settings/models/tasks.py
|
{
"start": 1978,
"end": 4005
}
|
class ____(PrefectBaseSettings):
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("tasks",))
refresh_cache: bool = Field(
default=False,
description="If `True`, enables a refresh of cached results: re-executing the task will refresh the cached results.",
)
default_no_cache: bool = Field(
default=False,
description="If `True`, sets the default cache policy on all tasks to `NO_CACHE`.",
)
disable_caching: bool = Field(
default=False,
description="If `True`, disables caching on all tasks regardless of cache policy.",
)
default_retries: int = Field(
default=0,
ge=0,
description="This value sets the default number of retries for all tasks.",
validation_alias=AliasChoices(
AliasPath("default_retries"),
"prefect_tasks_default_retries",
"prefect_task_default_retries",
),
)
default_retry_delay_seconds: TaskRetryDelaySeconds = Field(
default=0,
description="This value sets the default retry delay seconds for all tasks.",
validation_alias=AliasChoices(
AliasPath("default_retry_delay_seconds"),
"prefect_tasks_default_retry_delay_seconds",
"prefect_task_default_retry_delay_seconds",
),
)
default_persist_result: Optional[bool] = Field(
default=None,
description="If `True`, results will be persisted by default for all tasks. Set to `False` to disable persistence by default. "
"Note that setting to `False` will override the behavior set by a parent flow or task.",
)
runner: TasksRunnerSettings = Field(
default_factory=TasksRunnerSettings,
description="Settings for controlling task runner behavior",
)
scheduling: TasksSchedulingSettings = Field(
default_factory=TasksSchedulingSettings,
description="Settings for controlling client-side task scheduling behavior",
)
|
TasksSettings
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1477682,
"end": 1478503
}
|
class ____(sgqlc.types.Type, Node):
"""A Saved Reply is text a user can use to reply quickly."""
__schema__ = github_schema
__field_names__ = ("body", "body_html", "database_id", "title", "user")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The body of the saved reply."""
body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML")
"""The saved reply body rendered to HTML."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The title of the saved reply."""
user = sgqlc.types.Field(Actor, graphql_name="user")
"""The user that saved this reply."""
|
SavedReply
|
python
|
doocs__leetcode
|
solution/3600-3699/3667.Sort Array By Absolute Value/Solution.py
|
{
"start": 0,
"end": 131
}
|
class ____:
def sortByAbsoluteValue(self, nums: List[int]) -> List[int]:
return sorted(nums, key=lambda x: abs(x))
|
Solution
|
python
|
django__django
|
tests/urlpatterns_reverse/tests.py
|
{
"start": 59240,
"end": 63304
}
|
class ____(SimpleTestCase):
def test_urlpattern_resolve(self):
for (
path_,
url_name,
app_name,
namespace,
view_name,
func,
args,
kwargs,
) in resolve_test_data:
with self.subTest(path=path_):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path_)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path_)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get("/resolver_match/")
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, "test-resolver-match")
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
def test_repr(self):
self.assertEqual(
repr(resolve("/no_kwargs/42/37/")),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, "
"args=('42', '37'), kwargs={}, url_name='no-kwargs', app_names=[], "
"namespaces=[], route='^no_kwargs/([0-9]+)/([0-9]+)/$')",
)
def test_repr_extra_kwargs(self):
self.assertEqual(
repr(resolve("/mixed_args/1986/11/")),
"ResolverMatch(func=urlpatterns_reverse.views.empty_view, args=(), "
"kwargs={'arg2': '11', 'extra': True}, url_name='mixed-args', "
"app_names=[], namespaces=[], "
"route='^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', "
"captured_kwargs={'arg2': '11'}, extra_kwargs={'extra': True})",
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.reverse_lazy_urls")
def test_classbased_repr(self):
self.assertEqual(
repr(resolve("/redirect/")),
"ResolverMatch(func=urlpatterns_reverse.views.LazyRedirectView, "
"args=(), kwargs={}, url_name=None, app_names=[], "
"namespaces=[], route='redirect/')",
)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
def test_repr_functools_partial(self):
tests = [
("partial", "template.html"),
("partial_nested", "nested_partial.html"),
("partial_wrapped", "template.html"),
]
for name, template_name in tests:
with self.subTest(name=name):
func = (
f"functools.partial({views.empty_view!r}, "
f"template_name='{template_name}')"
)
self.assertEqual(
repr(resolve(f"/{name}/")),
f"ResolverMatch(func={func}, args=(), kwargs={{}}, "
f"url_name='{name}', app_names=[], namespaces=[], "
f"route='{name}/')",
)
@override_settings(ROOT_URLCONF="urlpatterns.path_urls")
def test_pickling(self):
msg = "Cannot pickle ResolverMatch."
with self.assertRaisesMessage(pickle.PicklingError, msg):
pickle.dumps(resolve("/users/"))
@override_settings(ROOT_URLCONF="urlpatterns_reverse.erroneous_urls")
|
ResolverMatchTests
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py
|
{
"start": 15583,
"end": 16874
}
|
class ____(Benchmark):
r"""
Dolan objective function.
This class defines the Dolan [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Dolan}}(x) = \lvert (x_1 + 1.7 x_2)\sin(x_1) - 1.5 x_3
- 0.1 x_4\cos(x_5 + x_5 - x_1) + 0.2 x_5^2 - x_2 - 1 \rvert
with :math:`x_i \in [-100, 100]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for
:math:`x = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Jamil equation is missing the absolute brackets around the entire
expression.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[-74.10522498, 44.33511286, 6.21069214,
18.42772233, -16.5839403]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return (abs((x[0] + 1.7 * x[1]) * sin(x[0]) - 1.5 * x[2]
- 0.1 * x[3] * cos(x[3] + x[4] - x[0]) + 0.2 * x[4] ** 2
- x[1] - 1))
|
Dolan
|
python
|
walkccc__LeetCode
|
solutions/642. Design Search Autocomplete System/642.py
|
{
"start": 455,
"end": 1365
}
|
class ____:
def __init__(self, sentences: list[str], times: list[int]):
self.root = TrieNode()
self.curr = self.root
self.s: list[str] = []
for sentence, time in zip(sentences, times):
self._insert(sentence, time)
def input(self, c: str) -> list[str]:
if c == '#':
self._insert(''.join(self.s), 1)
self.curr = self.root
self.s = []
return []
self.s.append(c)
if self.curr:
self.curr = self.curr.children.get(c, None)
if not self.curr:
return []
return [node.s for node in self.curr.top3]
def _insert(self, sentence: str, time: int) -> None:
node = self.root
for c in sentence:
node = node.children.setdefault(c, TrieNode())
node.s = sentence
node.time += time
leaf = node
node: TrieNode = self.root
for c in sentence:
node = node.children[c]
node.update(leaf)
|
AutocompleteSystem
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/ragged/row_partition_test.py
|
{
"start": 1953,
"end": 37707
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
# =============================================================================
# RowPartition class docstring examples
# =============================================================================
def testClassDocStringExamples(self):
# From section: "Component Tensors"
rp = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
self.assertAllEqual(rp.row_splits(), [0, 4, 4, 7, 8, 8])
del rp
# From section: "Alternative Row-Partitioning Schemes"
rt1 = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
rt2 = RowPartition.from_row_lengths(row_lengths=[4, 0, 3, 1, 0])
rt3 = RowPartition.from_value_rowids(
value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
rt4 = RowPartition.from_row_starts(row_starts=[0, 4, 4, 7, 8], nvals=8)
rt5 = RowPartition.from_row_limits(row_limits=[4, 4, 7, 8, 8])
for rp in (rt1, rt2, rt3, rt4, rt5):
self.assertAllEqual(rp.row_splits(), [0, 4, 4, 7, 8, 8])
del rt1, rt2, rt3, rt4, rt5
# From section: "Multiple Ragged Dimensions"
inner_rt = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
outer_rt = RowPartition.from_row_splits(row_splits=[0, 3, 3, 5])
del inner_rt, outer_rt
# =============================================================================
# RowPartition Constructor (private)
# =============================================================================
def testRowPartitionConstruction(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition(
row_splits=row_splits,
internal=row_partition._row_partition_factory_key)
self.assertAllEqual(rp.row_splits(), [0, 2, 2, 5, 6, 7])
def testRowPartitionConstructionErrors(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
with self.assertRaisesRegex(ValueError,
'RowPartition constructor is private'):
RowPartition(row_splits=row_splits)
with self.assertRaisesRegex(TypeError,
'Row-partitioning argument must be a Tensor'):
RowPartition(
row_splits=[0, 2, 2, 5, 6, 7],
internal=row_partition._row_partition_factory_key)
with self.assertRaisesRegex(ValueError, r'Shape \(6, 1\) must have rank 1'):
RowPartition(
row_splits=array_ops.expand_dims(row_splits, 1),
internal=row_partition._row_partition_factory_key)
with self.assertRaisesRegex(TypeError,
'Cached value must be a Tensor or None.'):
RowPartition(
row_splits=row_splits,
row_lengths=[2, 3, 4],
internal=row_partition._row_partition_factory_key)
with self.assertRaisesRegex(ValueError, 'Inconsistent dtype'):
RowPartition(
row_splits=constant_op.constant([0, 3], dtypes.int64),
nrows=constant_op.constant(1, dtypes.int32),
internal=row_partition._row_partition_factory_key)
# =============================================================================
# RowPartition Factory Ops
# =============================================================================
def testFromValueRowIdsWithDerivedNRows(self):
# nrows is known at graph creation time.
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
# TODO(martinz): add nrows
rp = RowPartition.from_value_rowids(value_rowids, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_splits = rp.row_splits()
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromValueRowIdsWithDerivedNRowsDynamic(self):
# nrows is not known at graph creation time.
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
value_rowids = array_ops.placeholder_with_default(value_rowids, shape=None)
rp = RowPartition.from_value_rowids(value_rowids, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, 5)
def testFromValueRowIdsWithExplicitNRows(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(7, dtypes.int64)
rp = RowPartition.from_value_rowids(value_rowids, nrows, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
rp_row_splits = rp.row_splits()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertIs(rp_nrows, nrows) # nrows
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7, 7, 7])
def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
rp = RowPartition.from_value_rowids(value_rowids, nrows, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
rp_row_splits = rp.row_splits()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertIs(rp_nrows, nrows) # nrows
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, nrows)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromValueRowIdsWithEmptyValues(self):
rp = RowPartition.from_value_rowids([])
rp_nrows = rp.nrows()
self.assertEqual(rp.dtype, dtypes.int64)
self.assertEqual(rp.value_rowids().shape.as_list(), [0])
self.assertAllEqual(rp_nrows, 0)
def testFromRowSplits(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_splits(row_splits, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertIs(rp_row_splits, row_splits)
self.assertAllEqual(rp_nrows, 5)
def testFromRowSplitsWithDifferentSplitTypes(self):
splits1 = [0, 2, 2, 5, 6, 7]
splits2 = np.array([0, 2, 2, 5, 6, 7], np.int64)
splits3 = np.array([0, 2, 2, 5, 6, 7], np.int32)
splits4 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
splits5 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int32)
rt1 = RowPartition.from_row_splits(splits1)
rt2 = RowPartition.from_row_splits(splits2)
rt3 = RowPartition.from_row_splits(splits3)
rt4 = RowPartition.from_row_splits(splits4)
rt5 = RowPartition.from_row_splits(splits5)
self.assertEqual(rt1.row_splits().dtype, dtypes.int64)
self.assertEqual(rt2.row_splits().dtype, dtypes.int64)
self.assertEqual(rt3.row_splits().dtype, dtypes.int32)
self.assertEqual(rt4.row_splits().dtype, dtypes.int64)
self.assertEqual(rt5.row_splits().dtype, dtypes.int32)
def testFromRowSplitsWithEmptySplits(self):
err_msg = 'row_splits tensor may not be empty'
with self.assertRaisesRegex(ValueError, err_msg):
RowPartition.from_row_splits([])
def testFromRowStarts(self):
nvals = constant_op.constant(7)
row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64)
rp = RowPartition.from_row_starts(row_starts, nvals, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_starts = rp.row_starts()
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_starts, row_starts)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromRowLimits(self):
row_limits = constant_op.constant([2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_limits(row_limits, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_limits = rp.row_limits()
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_limits, row_limits)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromRowLengths(self):
row_lengths = constant_op.constant([2, 0, 3, 1, 1], dtypes.int64)
rp = RowPartition.from_row_lengths(row_lengths, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_lengths = rp.row_lengths()
rp_nrows = rp.nrows()
self.assertIs(rp_row_lengths, row_lengths) # nrows
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_lengths, row_lengths)
def testFromUniformRowLength(self):
nvals = 16
a1 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=2)
self.assertAllEqual(a1.uniform_row_length(), 2)
self.assertAllEqual(a1.nrows(), 8)
def testFromUniformRowLengthWithEmptyValues(self):
a = RowPartition.from_uniform_row_length(
nvals=0, uniform_row_length=0, nrows=10)
self.assertEqual(self.evaluate(a.nvals()), 0)
self.assertEqual(self.evaluate(a.nrows()), 10)
def testFromUniformRowLengthWithPlaceholders1(self):
nvals = array_ops.placeholder_with_default(
constant_op.constant(6, dtype=dtypes.int64), None)
rt1 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=3)
const_nvals1 = self.evaluate(rt1.nvals())
self.assertEqual(const_nvals1, 6)
def testFromUniformRowLengthWithPlaceholders2(self):
nvals = array_ops.placeholder_with_default(6, None)
ph_rowlen = array_ops.placeholder_with_default(3, None)
rt2 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=ph_rowlen)
const_nvals2 = self.evaluate(rt2.nvals())
self.assertEqual(const_nvals2, 6)
def testFromValueRowIdsWithBadNRows(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
with self.assertRaisesRegex(ValueError, r'Expected nrows >= 0; got -2'):
RowPartition.from_value_rowids(
value_rowids=array_ops.placeholder_with_default(value_rowids, None),
nrows=-2)
with self.assertRaisesRegex(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, '
r'value_rowids\[-1\]=4'):
RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=2)
with self.assertRaisesRegex(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, '
r'value_rowids\[-1\]=4'):
RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=4)
with self.assertRaisesRegex(ValueError, r'Shape \(7, 1\) must have rank 1'):
RowPartition.from_value_rowids(
value_rowids=array_ops.expand_dims(value_rowids, 1), nrows=nrows)
with self.assertRaisesRegex(ValueError, r'Shape \(1,\) must have rank 0'):
RowPartition.from_value_rowids(
value_rowids=value_rowids, nrows=array_ops.expand_dims(nrows, 0))
# =============================================================================
# RowPartition.__str__
# =============================================================================
def testRowPartitionStr(self):
row_splits = [0, 2, 5, 6, 6, 7]
rp = RowPartition.from_row_splits(row_splits, validate=False)
if context.executing_eagerly():
expected_repr = 'tf.RowPartition(row_splits=[0 2 5 6 6 7])'
else:
expected_repr = ('tf.RowPartition(row_splits='
'Tensor("RowPartitionFromRowSplits/row_splits:0", '
'shape=(6,), dtype=int64))')
self.assertEqual(repr(rp), expected_repr)
self.assertEqual(str(rp), expected_repr)
def testRowPartitionStrUniformRowLength(self):
rp = RowPartition.from_uniform_row_length(5, nvals=10, nrows=2)
if context.executing_eagerly():
expected_repr = ('tf.RowPartition(nrows=2, uniform_row_length=5)')
else:
expected_repr = (
'tf.RowPartition(nrows='
'Tensor("RowPartitionFromUniformRowLength/'
'nrows:0", shape=(), dtype=int64), '
'uniform_row_length=Tensor("RowPartitionFromUniformRowLength/'
'uniform_row_length:0", shape=(), dtype=int64))')
self.assertEqual(repr(rp), expected_repr)
self.assertEqual(str(rp), expected_repr)
@parameterized.parameters([
# from_value_rowids
{
'descr': 'bad rank for value_rowids',
'factory': RowPartition.from_value_rowids,
'value_rowids': [[1, 2], [3, 4]],
'nrows': 10,
},
{
'descr': 'bad rank for nrows',
'factory': RowPartition.from_value_rowids,
'value_rowids': [1, 2, 3, 4],
'nrows': [10],
},
{
'descr': 'negative value_rowid',
'factory': RowPartition.from_value_rowids,
'value_rowids': [-5, 2, 3, 4],
'nrows': 10,
},
{
'descr': 'non-monotonic-increasing value_rowid',
'factory': RowPartition.from_value_rowids,
'value_rowids': [4, 3, 2, 1],
'nrows': 10,
},
{
'descr': 'value_rowid > nrows',
'factory': RowPartition.from_value_rowids,
'value_rowids': [1, 2, 3, 4],
'nrows': 2,
},
# from_row_splits
{
'descr': 'bad rank for row_splits',
'factory': RowPartition.from_row_splits,
'row_splits': [[1, 2], [3, 4]],
},
{
'descr': 'row_splits[0] != 0',
'factory': RowPartition.from_row_splits,
'row_splits': [2, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_splits',
'factory': RowPartition.from_row_splits,
'row_splits': [0, 3, 2, 4],
},
# from_row_lengths
{
'descr': 'bad rank for row_lengths',
'factory': RowPartition.from_row_lengths,
'row_lengths': [[1, 2], [1, 0]],
},
{
'descr': 'negative row_lengths',
'factory': RowPartition.from_row_lengths,
'row_lengths': [3, -1, 2],
},
# from_row_starts
{
'descr': 'bad rank for row_starts',
'factory': RowPartition.from_row_starts,
'nvals': 2,
'row_starts': [[1, 2], [3, 4]],
},
{
'descr': 'row_starts[0] != 0',
'factory': RowPartition.from_row_starts,
'nvals': 5,
'row_starts': [2, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_starts',
'factory': RowPartition.from_row_starts,
'nvals': 4,
'row_starts': [0, 3, 2, 4],
},
{
'descr': 'row_starts[0] > nvals',
'factory': RowPartition.from_row_starts,
'nvals': 4,
'row_starts': [0, 2, 3, 5],
},
# from_row_limits
{
'descr': 'bad rank for row_limits',
'factory': RowPartition.from_row_limits,
'row_limits': [[1, 2], [3, 4]],
},
{
'descr': 'row_limits[0] < 0',
'factory': RowPartition.from_row_limits,
'row_limits': [-1, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_limits',
'factory': RowPartition.from_row_limits,
'row_limits': [0, 3, 2, 4],
},
# from_uniform_row_length
{
'descr': 'rowlen * nrows != nvals (1)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 5,
'uniform_row_length': 3,
},
{
'descr': 'rowlen * nrows != nvals (2)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 5,
'uniform_row_length': 6,
},
{
'descr': 'rowlen * nrows != nvals (3)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 6,
'uniform_row_length': 3,
'nrows': 3,
},
{
'descr': 'rowlen must be a scalar',
'factory': RowPartition.from_uniform_row_length,
'nvals': 4,
'uniform_row_length': [2],
},
{
'descr': 'rowlen must be nonnegative',
'factory': RowPartition.from_uniform_row_length,
'nvals': 4,
'uniform_row_length': -1,
},
])
def testFactoryValidation(self, descr, factory, **kwargs):
# When input tensors have shape information, some of these errors will be
# detected statically.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
partition = factory(**kwargs)
self.evaluate(partition.row_splits())
# Remove shape information (by wrapping tensors in placeholders), and check
# that we detect the errors when the graph is run.
if not context.executing_eagerly():
def wrap_arg(v):
return array_ops.placeholder_with_default(
constant_op.constant(v, dtype=dtypes.int64),
tensor_shape.TensorShape(None))
kwargs = dict((k, wrap_arg(v)) for (k, v) in kwargs.items())
with self.assertRaises(errors.InvalidArgumentError):
partition = factory(**kwargs)
self.evaluate(partition.row_splits())
@parameterized.named_parameters([
('FromRowSplits', lambda: RowPartition.from_row_splits([0, 2, 8]),
['row_splits']),
('FromRowLengths', lambda: RowPartition.from_row_lengths([3, 0, 8]),
['row_splits', 'row_lengths']),
('FromValueRowIds',
lambda: RowPartition.from_value_rowids([0, 0, 3, 4, 4, 4]),
['row_splits', 'value_rowids', 'row_lengths', 'nrows']),
('FromRowStarts',
lambda: RowPartition.from_row_starts([0, 3, 7], nvals=10),
['row_splits']),
('FromRowLimits', lambda: RowPartition.from_row_limits([3, 7, 10]),
['row_splits']),
])
def testPrecomputedSplits(self, rp_factory, expected_encodings):
rp = rp_factory()
self.assertEqual(rp._has_precomputed_row_splits(),
'row_splits' in expected_encodings)
self.assertEqual(rp._has_precomputed_row_lengths(),
'row_lengths' in expected_encodings)
self.assertEqual(rp._has_precomputed_value_rowids(),
'value_rowids' in expected_encodings)
self.assertEqual(rp._has_precomputed_nrows(), 'nrows' in expected_encodings)
def testWithPrecomputedSplits(self):
rp = RowPartition.from_row_splits([0, 2, 8])
rp_with_row_splits = rp._with_precomputed_row_splits()
self.assertTrue(rp_with_row_splits._has_precomputed_row_splits())
self.assertFalse(rp._has_precomputed_row_lengths())
rp_with_row_lengths = rp._with_precomputed_row_lengths()
self.assertTrue(rp_with_row_lengths._has_precomputed_row_lengths())
self.assertFalse(rp._has_precomputed_value_rowids())
rp_with_value_rowids = rp._with_precomputed_value_rowids()
self.assertTrue(rp_with_value_rowids._has_precomputed_value_rowids())
self.assertFalse(rp._has_precomputed_nrows())
rp_with_nrows = rp._with_precomputed_nrows()
self.assertTrue(rp_with_nrows._has_precomputed_nrows())
self.assertFalse(rp._has_precomputed_nvals())
rp_with_nvals = rp._with_precomputed_nvals()
self.assertTrue(rp_with_nvals._has_precomputed_nvals())
@parameterized.named_parameters([
dict(
testcase_name='FromRowSplitsAndRowSplits',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8]),
expected_encodings=['row_splits']),
dict(
testcase_name='FromRowSplitsAndUniformRowLength',
x=lambda: RowPartition.from_row_splits([0, 3, 6]),
y=lambda: RowPartition.from_uniform_row_length(3, nvals=6),
expected_encodings=['row_splits', 'uniform_row_length', 'nrows']),
dict(
testcase_name='FromRowSplitsAndRowLengths',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_lengths([3, 5]),
expected_encodings=['row_splits', 'row_lengths']),
dict(
testcase_name='FromRowSplitsAndValueRowIds',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_value_rowids([0, 0, 0, 1, 1, 1, 1, 1]),
expected_encodings=[
'row_splits', 'row_lengths', 'value_rowids', 'nrows'
]),
dict(
testcase_name='FromRowSplitsAndRowSplitsPlusNRows',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8]).
_with_precomputed_nrows(),
expected_encodings=['row_splits', 'nrows']),
])
def testMergePrecomputedEncodings(self, x, y, expected_encodings):
x = x()
y = y()
for validate in (True, False):
result = x._merge_precomputed_encodings(y, validate)
self.assertEqual(result._has_precomputed_row_splits(),
'row_splits' in expected_encodings)
self.assertEqual(result._has_precomputed_row_lengths(),
'row_lengths' in expected_encodings)
self.assertEqual(result._has_precomputed_value_rowids(),
'value_rowids' in expected_encodings)
self.assertEqual(result._has_precomputed_nrows(),
'nrows' in expected_encodings)
self.assertEqual(result.uniform_row_length() is not None,
'uniform_row_length' in expected_encodings)
for r in (x, y):
if (r._has_precomputed_row_splits() and
result._has_precomputed_row_splits()):
self.assertAllEqual(r.row_splits(), result.row_splits())
if (r._has_precomputed_row_lengths() and
result._has_precomputed_row_lengths()):
self.assertAllEqual(r.row_lengths(), result.row_lengths())
if (r._has_precomputed_value_rowids() and
result._has_precomputed_value_rowids()):
self.assertAllEqual(r.value_rowids(), result.value_rowids())
if r._has_precomputed_nrows() and result._has_precomputed_nrows():
self.assertAllEqual(r.nrows(), result.nrows())
if (r.uniform_row_length() is not None and
result.uniform_row_length() is not None):
self.assertAllEqual(r.uniform_row_length(),
result.uniform_row_length())
def testMergePrecomputedEncodingsFastPaths(self):
# Same object: x gets returned as-is.
x = RowPartition.from_row_splits([0, 3, 8, 8])
self.assertIs(x._merge_precomputed_encodings(x), x)
# Same encoding tensor objects: x gets returned as-is.
y = RowPartition.from_row_splits(x.row_splits(), validate=False)
self.assertIs(x._merge_precomputed_encodings(y), x)
def testMergePrecomputedEncodingsWithMatchingTensors(self):
# The encoding tensors for `a` are a superset of the encoding tensors
# for `b`, and where they overlap, they the same tensor objects.
a = RowPartition.from_value_rowids([0, 0, 3, 4, 4, 4])
b = RowPartition.from_row_splits(a.row_splits(), validate=False)
self.assertIs(a._merge_precomputed_encodings(b), a)
self.assertIs(b._merge_precomputed_encodings(a), a)
self.assertIsNot(a, b)
@parameterized.named_parameters([
dict(
testcase_name='RowSplitMismatch',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8, 9]),
message='incompatible row_splits'),
dict(
testcase_name='RowLengthMismatch',
x=lambda: RowPartition.from_row_lengths([2, 0, 2]),
y=lambda: RowPartition.from_row_lengths([2, 0, 2, 1]),
message='incompatible row_splits'), # row_splits is checked first
dict(
testcase_name='ValueRowIdMismatch',
x=lambda: RowPartition.from_value_rowids([0, 3, 3, 4]),
y=lambda: RowPartition.from_value_rowids([0, 3, 4]),
message='incompatible value_rowids'),
])
def testMergePrecomputedEncodingStaticErrors(self, x, y, message):
if context.executing_eagerly():
return
# Errors that are caught by static shape checks.
x = x()
y = y()
with self.assertRaisesRegex(ValueError, message):
x._merge_precomputed_encodings(y).row_splits()
with self.assertRaisesRegex(ValueError, message):
y._merge_precomputed_encodings(x).row_splits()
@parameterized.named_parameters([
dict(
testcase_name='NRowsMismatchAlt',
x=lambda: RowPartition.from_uniform_row_length(5, nrows=4, nvals=20),
y=lambda: RowPartition.from_uniform_row_length(5, nrows=3, nvals=15),
message='incompatible nrows'),
dict(
testcase_name='UniformRowLengthMismatch',
x=lambda: RowPartition.from_uniform_row_length(5, nvals=20),
y=lambda: RowPartition.from_uniform_row_length(2, nvals=8),
message='incompatible (nvals|uniform_row_length)'),
dict(
testcase_name='RowSplitMismatch',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 5, 8]),
message='incompatible row_splits'),
dict(
testcase_name='RowLengthMismatch',
x=lambda: RowPartition.from_row_lengths([2, 0, 2]),
y=lambda: RowPartition.from_row_lengths([0, 0, 2]),
message='incompatible (row_splits|nvals)'),
dict(
testcase_name='ValueRowIdMismatch',
x=lambda: RowPartition.from_value_rowids([0, 3, 3]),
y=lambda: RowPartition.from_value_rowids([0, 0, 3]),
message='incompatible row_splits'), # row_splits is checked first
])
def testMergePrecomputedEncodingRuntimeErrors(self, x, y, message):
# Errors that are caught by runtime value checks.
x = x()
y = y()
with self.assertRaisesRegex(errors.InvalidArgumentError, message):
self.evaluate(x._merge_precomputed_encodings(y).row_splits())
with self.assertRaisesRegex(errors.InvalidArgumentError, message):
self.evaluate(y._merge_precomputed_encodings(x).row_splits())
@parameterized.named_parameters([
# It throws the right error, but it still complains.
dict(
testcase_name='NRowsMismatch',
x=lambda: RowPartition.from_uniform_row_length(5, nvals=20),
y=lambda: RowPartition.from_uniform_row_length(5, nvals=15),
message='incompatible nvals',
emessage='incompatible nrows'),
])
def testMergePrecomputedEncodingStaticErrors2(self, x, y,
message, emessage):
# Message error and type varies depending upon eager execution.
x = x()
y = y()
error_type = errors_impl.InvalidArgumentError
expected_message = emessage if context.executing_eagerly() else message
with self.assertRaisesRegex(error_type, expected_message):
self.evaluate(x._merge_precomputed_encodings(y).row_splits())
with self.assertRaisesRegex(error_type, expected_message):
self.evaluate(y._merge_precomputed_encodings(x).row_splits())
@parameterized.named_parameters([
dict(
testcase_name='NoneSpecified',
rp=(lambda: RowPartition.from_row_splits([0, 3, 8])),
spec=RowPartitionSpec(nrows=None, nvals=None, dtype=dtypes.int64)),
dict(
testcase_name='NRowsSpecified',
rp=(lambda: RowPartition.from_row_splits([0, 3, 8])),
spec=RowPartitionSpec(nrows=2, nvals=None, dtype=dtypes.int64)),
dict(
testcase_name='NValsSpecified',
rp=_get_specified_row_partition,
spec=RowPartitionSpec(nrows=None, nvals=8, dtype=dtypes.int64))
])
def testMergeWithSpecNoop(self, rp, spec):
rp = rp()
actual = rp._merge_with_spec(spec)
self.assertAllEqual(actual.row_splits(), rp.row_splits())
self.assertAllEqual(actual.static_nrows, rp.static_nrows)
self.assertAllEqual(actual.static_nvals, rp.static_nvals)
@parameterized.named_parameters([
dict(
testcase_name='NRowsNValsUpdated',
rp=(lambda: RowPartition.from_row_splits([0, 3, 8])),
spec=RowPartitionSpec(nrows=2, nvals=8, dtype=dtypes.int64),
expected=_get_specified_row_partition),
dict(
testcase_name='NValsUpdated',
rp=(lambda: RowPartition.from_row_splits([0, 3, 8])),
spec=RowPartitionSpec(nrows=None, nvals=8, dtype=dtypes.int64),
expected=_get_specified_row_partition)])
def testMergeWithSpecUpdate(self, rp, spec, expected):
rp = rp()
expected = expected()
actual = rp._merge_with_spec(spec)
self.assertAllEqual(actual.row_splits(), expected.row_splits())
self.assertAllEqual(actual.static_nrows, expected.static_nrows)
self.assertAllEqual(actual.static_nvals, expected.static_nvals)
@parameterized.named_parameters([
dict(
testcase_name='from_uniform_row_length',
x=lambda: RowPartition.from_uniform_row_length(5, nvals=20),
expected=True),
dict(
testcase_name='from_row_splits',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
expected=False),
dict(
testcase_name='from_row_lengths',
x=lambda: RowPartition.from_row_lengths([2, 0, 2]),
expected=False),
dict(
testcase_name='from_row_lengths_uniform',
x=lambda: RowPartition.from_row_lengths([3, 3, 3]),
expected=False),
])
def testIsUniform(self, x, expected):
x = x()
self.assertEqual(expected, x.is_uniform())
@parameterized.named_parameters([
dict(
testcase_name='doc_example',
x=lambda: RowPartition.from_row_lengths([3, 2, 0, 2]),
expected=[0, 1, 2, 0, 1, 0, 1]),
dict(
testcase_name='from_uniform_row_length',
x=lambda: RowPartition.from_uniform_row_length(4, nvals=12),
expected=[0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]),
dict(
testcase_name='from_row_splits',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
expected=[0, 1, 2, 0, 1, 2, 3, 4]),
])
def testOffsetsInRows(self, x, expected):
x = x()
actual = x.offsets_in_rows()
self.assertAllEqual(expected, actual)
def testFromUniformRowLengthBugConvertToTensor(self):
# This originally failed to run because nrows was dtypes.int32. I think
# we may need to consider the semantics of the type of a RowPartition
# if preferred_dtype is unspecified. Also, looking at convert_to_tensor:
# dtype specifies the type of the output.
# preferred_dtype/dtype_hint is a suggestion, and dtype_hint is the new
# name.
nrows = constant_op.constant(3, dtype=dtypes.int32)
nvals = constant_op.constant(12, dtype=dtypes.int64)
row_length = constant_op.constant(4, dtype=dtypes.int64)
rp = RowPartition.from_uniform_row_length(row_length, nvals=nvals,
nrows=nrows, dtype=dtypes.int64)
self.assertEqual(rp.nrows().dtype, dtypes.int64)
def testFromUniformRowLengthNvalDynamic(self):
# A key question is whether if nrows and uniform_row_length are known,
# and nvals is given but not known statically, should we determine nvals?
# TODO(martinz): Uncomment after nvals is fixed.
# @def_function.function(
# input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
# def foo(nvals):
# rp = RowPartition.from_uniform_row_length(12, nvals=nvals, nrows=3)
# nval_output = tensor_util.constant_value(rp.nvals())
# self.assertEqual(nval_output, 36)
# foo(constant_op.constant(36, dtype=dtypes.int32))
pass
def testFromUniformRowLengthNvalDynamicNoValidate(self):
# A key question is whether if nrows and uniform_row_length are known,
# and nvals is given but not known statically, should we determine nvals?
# TODO(martinz): Uncomment after nvals is fixed.
# @def_function.function(
# input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
# def foo(nvals):
# rp = RowPartition.from_uniform_row_length(12, nvals=nvals, nrows=3,
# validate=False)
# nval_output = tensor_util.constant_value(rp.nvals())
# self.assertEqual(nval_output, 36)
# foo(constant_op.constant(36, dtype=dtypes.int32))
pass
def testFromUniformRowLengthNvalDynamicWrong(self):
# A key question is whether if nrows and uniform_row_length are known,
# and nvals is given but not known statically and WRONG,
# what should we do? We add a check, but checks are only checked for
# row_splits.
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(nvals):
rp = RowPartition.from_uniform_row_length(12, nvals=nvals, nrows=3)
return rp.nvals()
with self.assertRaises(errors.InvalidArgumentError):
nvals = foo(constant_op.constant(7, dtype=dtypes.int32))
self.evaluate(nvals)
def testFromUniformRowLengthNvalDynamicWrongRowSplits(self):
# A key question is whether if nrows and uniform_row_length are known,
# and nvals is given but not known statically and WRONG,
# what should we do?
# A key question is whether if nrows and uniform_row_length are known,
# and nvals is given but not known statically and WRONG,
# what should we do? We add a check, but checks are only checked for
# row_splits.
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(nvals):
rp = RowPartition.from_uniform_row_length(12, nvals=nvals, nrows=3)
return rp.row_splits()
with self.assertRaises(errors.InvalidArgumentError):
rs = foo(constant_op.constant(7, dtype=dtypes.int32))
self.evaluate(rs)
def testFromUniformRowPartitionNrows(self):
rp = RowPartition.from_uniform_row_length(3, nrows=4)
self.assertAllEqual(4, rp.nrows())
self.assertAllEqual(3, rp.uniform_row_length())
self.assertAllEqual(12, rp.static_nvals)
def testFromUniformRowPartitionNvalsStatic(self):
rp = RowPartition.from_uniform_row_length(3, nvals=12)
self.assertAllEqual(4, rp.static_nrows)
self.assertAllEqual(3, rp.static_uniform_row_length)
self.assertAllEqual(12, rp.static_nvals)
def testFromUniformRowPartitionNvalsStaticNoValidate(self):
rp = RowPartition.from_uniform_row_length(3, nrows=4, nvals=12,
validate=False)
self.assertAllEqual(4, rp.static_nrows)
self.assertAllEqual(3, rp.static_uniform_row_length)
self.assertAllEqual(12, rp.static_nvals)
def testFromUniformRowPartitionNvalsIs(self):
# TODO(martinz): Uncomment after nvals is fixed.
# nvals = constant_op.constant(12)
# rp = RowPartition.from_uniform_row_length(3, nvals=nvals)
# self.assertIs(rp.nvals(), nvals)
pass
def testFromUniformRowPartitionRowStartsStatic(self):
rp = RowPartition.from_row_starts([0, 3, 6], nvals=12)
self.assertAllEqual(12, rp.static_nvals)
def testStaticNrows(self):
rp = RowPartition.from_row_splits([0, 3, 4, 5])
static_nrows = rp.static_nrows
self.assertIsInstance(static_nrows, int)
self.assertAllEqual(3, static_nrows)
def testStaticNrowsUnknown(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def foo(rs):
rp = RowPartition.from_row_splits(rs)
static_nrows = rp.static_nrows
self.assertIsNone(static_nrows)
foo(array_ops.constant([0, 3, 4, 5], dtype=dtypes.int32))
@test_util.run_all_in_graph_and_eager_modes
|
RowPartitionTest
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/processors/delayed_workflow.py
|
{
"start": 4812,
"end": 8180
}
|
class ____:
"""
Immutable container for all data from Redis.
Any lookups or summaries or other processing that can be purely derived
from the data should be done on this object so that it's obvious where we're operating
based on parameter data.
"""
events: Mapping[EventKey, EventInstance]
@classmethod
def from_redis_data(
cls, redis_data: dict[str, str], *, continue_on_error: bool
) -> EventRedisData:
events = {}
for key, value in redis_data.items():
try:
event_key = EventKey.from_redis_key(key)
event_instance = EventInstance.parse_raw(value)
events[event_key] = event_instance
except Exception as e:
logger.exception(
"Failed to parse workflow event data",
extra={"key": key, "value": value, "error": str(e)},
)
if not continue_on_error:
raise ValueError(f"Failed to parse Redis data: {str(e)}") from e
return cls(events=events)
@cached_property
def dcg_ids(self) -> set[DataConditionGroupId]:
return {id for key in self.events for id in key.dcg_ids}
@cached_property
def dcg_to_groups(self) -> Mapping[DataConditionGroupId, set[GroupId]]:
dcg_to_groups: dict[DataConditionGroupId, set[GroupId]] = defaultdict(set)
for key in self.events:
for dcg_id in key.dcg_ids:
dcg_to_groups[dcg_id].add(key.group_id)
return dcg_to_groups
@cached_property
def dcg_to_workflow(self) -> dict[DataConditionGroupId, WorkflowId]:
"""Get mapping of DCG IDs to workflow IDs, combining both trigger and action filter groups."""
dcg_to_workflow = {}
for key in self.events:
for dcg_id in key.dcg_ids:
dcg_to_workflow[dcg_id] = key.workflow_id
return dcg_to_workflow
@cached_property
def workflow_ids(self) -> set[WorkflowId]:
return {key.workflow_id for key in self.events}
@cached_property
def event_ids(self) -> set[str]:
return {instance.event_id for instance in self.events.values() if instance.event_id}
@cached_property
def occurrence_ids(self) -> set[str]:
return {
instance.occurrence_id for instance in self.events.values() if instance.occurrence_id
}
@cached_property
def group_ids(self) -> set[GroupId]:
return {key.group_id for key in self.events}
@cached_property
def dcg_to_timestamp(self) -> dict[int, datetime | None]:
"""
A DCG can be recorded with an event for later processing multiple times.
We need to pick a time to use when processing them in bulk, so to bias for recency we associate each DCG with the latest timestamp.
"""
result: dict[int, datetime | None] = defaultdict(lambda: None)
for key, instance in self.events.items():
timestamp = instance.timestamp
for dcg_id in key.dcg_ids:
existing_timestamp = result[dcg_id]
if timestamp is None:
continue
elif existing_timestamp is not None and timestamp > existing_timestamp:
result[dcg_id] = timestamp
return result
@dataclass
|
EventRedisData
|
python
|
pytorch__pytorch
|
torch/_lazy/extract_compiled_graph.py
|
{
"start": 470,
"end": 1853
}
|
class ____:
"""
The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing.
Specifically, those graph inputs corresponding to method parameters should be replaced with the
arguments for the current call.
tensor_id_to_arg_idx maps the tensor id to the parameter index.
graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the
TS/XLA graph inputs.
"""
tensor_id_to_arg_idx: dict[int, int]
graph_input_tensor_ids: list[int]
# there are 2 categories of graph_input_tensors.
# Category 1: those whose id are not found in tensor_id_to_arg_idx. These are
# most likely const tensors and we can get its content from graph_input_tensors
# Category 2: those whose id are found in tensor_id_to_arg_idx. We should get
# the tensor from method arguments
graph_input_ivalues: list[Any]
# get the real graph input tensors
def __call__(self, args):
real_input = []
for tensor_id, traced_ivalue in zip(
self.graph_input_tensor_ids, self.graph_input_ivalues
):
arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)
if arg_idx is None:
inp = traced_ivalue
else:
inp = args[arg_idx]
real_input.append(inp)
return real_input
|
GraphInputMatcher
|
python
|
zostera__django-bootstrap4
|
tests/test_paginator.py
|
{
"start": 163,
"end": 1817
}
|
class ____(TestCase):
def test_url_replace_param(self):
self.assertEqual(url_replace_param("/foo/bar?baz=foo", "baz", "yohoo"), "/foo/bar?baz=yohoo")
self.assertEqual(url_replace_param("/foo/bar?baz=foo", "baz", None), "/foo/bar")
self.assertEqual(url_replace_param("/foo/bar#id", "baz", "foo"), "/foo/bar?baz=foo#id")
def bootstrap_pagination(self, page, extra=""):
"""Render bootstrap_pagination tag."""
template = """
{% load bootstrap4 %}
{% bootstrap_pagination page {extra} %}
""".replace("{extra}", extra)
return render_template(template, {"page": page})
def test_paginator(self):
objects = ["john", "paul", "george", "ringo"]
p = Paginator(objects, 2)
res = self.bootstrap_pagination(p.page(2), extra='url="/projects/?foo=bar"')
# order in dicts is not guaranteed in some python versions,
# so we have to check both options
self.assertTrue("/projects/?foo=bar&page=1" in res or "/projects/?page=1&foo=bar" in res)
self.assertTrue("/projects/?foo=bar&page=3" not in res and "/projects/?page=3&foo=bar" not in res)
res = self.bootstrap_pagination(p.page(2), extra='url="/projects/#id"')
self.assertTrue("/projects/?page=1#id" in res)
res = self.bootstrap_pagination(p.page(2), extra='url="/projects/?page=3#id"')
self.assertTrue("/projects/?page=1#id" in res)
res = self.bootstrap_pagination(p.page(2), extra='url="/projects/?page=3" extra="id=20"')
self.assertTrue("/projects/?page=1&id=20" in res or "/projects/?id=20&page=1" in res)
|
PaginatorTest
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 153091,
"end": 156916
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"branch_protection_rule_id",
"pattern",
"requires_approving_reviews",
"required_approving_review_count",
"requires_commit_signatures",
"requires_linear_history",
"blocks_creations",
"allows_force_pushes",
"allows_deletions",
"is_admin_enforced",
"requires_status_checks",
"requires_strict_status_checks",
"requires_code_owner_reviews",
"dismisses_stale_reviews",
"restricts_review_dismissals",
"review_dismissal_actor_ids",
"bypass_pull_request_actor_ids",
"bypass_force_push_actor_ids",
"restricts_pushes",
"push_actor_ids",
"required_status_check_contexts",
"required_status_checks",
"requires_conversation_resolution",
"client_mutation_id",
)
branch_protection_rule_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="branchProtectionRuleId"
)
pattern = sgqlc.types.Field(String, graphql_name="pattern")
requires_approving_reviews = sgqlc.types.Field(
Boolean, graphql_name="requiresApprovingReviews"
)
required_approving_review_count = sgqlc.types.Field(
Int, graphql_name="requiredApprovingReviewCount"
)
requires_commit_signatures = sgqlc.types.Field(
Boolean, graphql_name="requiresCommitSignatures"
)
requires_linear_history = sgqlc.types.Field(
Boolean, graphql_name="requiresLinearHistory"
)
blocks_creations = sgqlc.types.Field(Boolean, graphql_name="blocksCreations")
allows_force_pushes = sgqlc.types.Field(Boolean, graphql_name="allowsForcePushes")
allows_deletions = sgqlc.types.Field(Boolean, graphql_name="allowsDeletions")
is_admin_enforced = sgqlc.types.Field(Boolean, graphql_name="isAdminEnforced")
requires_status_checks = sgqlc.types.Field(
Boolean, graphql_name="requiresStatusChecks"
)
requires_strict_status_checks = sgqlc.types.Field(
Boolean, graphql_name="requiresStrictStatusChecks"
)
requires_code_owner_reviews = sgqlc.types.Field(
Boolean, graphql_name="requiresCodeOwnerReviews"
)
dismisses_stale_reviews = sgqlc.types.Field(
Boolean, graphql_name="dismissesStaleReviews"
)
restricts_review_dismissals = sgqlc.types.Field(
Boolean, graphql_name="restrictsReviewDismissals"
)
review_dismissal_actor_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)),
graphql_name="reviewDismissalActorIds",
)
bypass_pull_request_actor_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)),
graphql_name="bypassPullRequestActorIds",
)
bypass_force_push_actor_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)),
graphql_name="bypassForcePushActorIds",
)
restricts_pushes = sgqlc.types.Field(Boolean, graphql_name="restrictsPushes")
push_actor_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="pushActorIds"
)
required_status_check_contexts = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="requiredStatusCheckContexts",
)
required_status_checks = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(RequiredStatusCheckInput)),
graphql_name="requiredStatusChecks",
)
requires_conversation_resolution = sgqlc.types.Field(
Boolean, graphql_name="requiresConversationResolution"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
UpdateBranchProtectionRuleInput
|
python
|
patrick-kidger__equinox
|
equinox/internal/_primitive.py
|
{
"start": 2235,
"end": 16418
}
|
class ____:
__slots__ = ("treedef_out", "static_out", "__weakref__")
def called(self):
return hasattr(self, "treedef_out")
def get(self):
return self.treedef_out, self.static_out
def __call__(self, out, like=_like_sentinel):
if like is _like_sentinel:
dynamic_out, static_out = partition(out, _is_array_like_internal)
flat_out, treedef_out = jtu.tree_flatten(dynamic_out)
try:
treedef_out_old = self.treedef_out
static_out_old = self.static_out
except AttributeError:
self.treedef_out = treedef_out
self.static_out = static_out
else:
assert treedef_out_old == treedef_out
assert tree_equal(static_out_old, static_out)
return flat_out
else:
assert jtu.tree_structure(out, is_leaf=_is_none) == jtu.tree_structure(
like, is_leaf=_is_none
)
spec = jtu.tree_map(_make_spec, out, like, is_leaf=_is_none)
dynamic_out, static_out = partition(out, spec, is_leaf=_is_none)
flat_out, treedef_out = jtu.tree_flatten(dynamic_out)
try:
treedef_out_old = self.treedef_out
static_out_old = self.static_out
except AttributeError:
self.treedef_out = treedef_out
self.static_out = static_out
else:
assert treedef_out_old == treedef_out
assert tree_equal(static_out_old, static_out)
like = jtu.tree_map(_replace_none, like, is_leaf=_is_none)
like = jtu.tree_map(_get_second, dynamic_out, like, is_leaf=_is_none)
flat_like, treedef_like = jtu.tree_flatten(like)
flat_like = [None if x is _dummy_none else x for x in flat_like]
assert treedef_like == treedef_out
assert len(flat_out) == len(flat_like)
return flat_out, flat_like
def filter_primitive_def(rule):
"""For wrapping def_impl and def_abstract_eval.
These can now take arbitrary inputs and outputs.
"""
def _wrapper(*dynamic, treedef, static, flatten):
args = jtu.tree_unflatten(treedef, _combine(dynamic, static))
out = rule(*args)
return flatten(out)
return _wrapper
def filter_primitive_jvp(rule):
"""
The input tangents (to the wrapped rule) will be a PyTree with the same
structure as the input primals. `None` indicates symbolic zero tangents,
in particular for non-JAX-array-likes.
The output tangents are expected to match the output primals, necessarily
with `None` for all non-JAX-array-likes.
"""
def _wrapper(primals, tangents, *, treedef, static, flatten):
tangents = [None if type(t) is ad.Zero else t for t in tangents]
tangents_static = [x if x is _missing_dynamic else None for x in static]
primals = jtu.tree_unflatten(treedef, _combine(primals, static))
tangents = jtu.tree_unflatten(treedef, _combine(tangents, tangents_static))
primals_out, tangents_out = rule(primals, tangents)
flat_primals_out, flat_tangents_out = flatten(primals_out, tangents_out)
flat_tangents_out = [
_zero_from_primal(p) if t is None else t
for p, t in zip(flat_primals_out, flat_tangents_out)
]
return flat_primals_out, flat_tangents_out
return _wrapper
_sentinel: Any = object()
def filter_primitive_transpose(rule=_sentinel, *, materialise_zeros=False):
"""
The `inputs` to the transpose rule are a PyTree like the primal
inputs, with `UndefinedPrimal`s where appropriate.
The `cts_out` passed to the transpose rule are a PyTree like the
primal output, with `None` for symbolic zero cotangents, in particular
for non-JAX-array-likes.
The output from the rule should be a PyTree like the primal input.
All leaves which were non-JAX-array-like, or which should have zero
cotangent, should have cotangent `None`.
"""
if rule is _sentinel:
return ft.partial(
filter_primitive_transpose, materialise_zeros=materialise_zeros
)
def _wrapper(cts_out, *dynamic, treedef, static, flatten):
treedef_out, _ = flatten.get()
if materialise_zeros:
cts_out = [ad.instantiate_zeros(ct) for ct in cts_out]
else:
cts_out = [None if type(ct) is ad.Zero else ct for ct in cts_out]
cts_out = jtu.tree_unflatten(treedef_out, cts_out)
wrapped_dynamic = [_wrap_undefined(x) for x in dynamic]
wrapped_flat = _combine(wrapped_dynamic, static)
wrapped_inputs = jtu.tree_unflatten(treedef, wrapped_flat)
inputs = jtu.tree_map(_unwrap_undefined, wrapped_inputs)
cts = rule(inputs, cts_out)
flat_inputs, flat_cts = Flatten()(wrapped_inputs, cts)
flat_inputs = [_unwrap_undefined(p, aval=True) for p in flat_inputs]
flat_cts = [
_zero_from_primal(p) if ct is None else ct
for p, ct in zip(flat_inputs, flat_cts)
]
assert len(dynamic) == len(flat_cts)
return flat_cts
return _wrapper
def filter_primitive_batching(rule):
"""
The input batch axes (to the wrapped rule) will be a PyTree with the same
structure as the input primals, with `None` for all non-JAX-arrays.
The output batch axes are expected to match the output primals, with `None`
for all non-JAX-arrays.
"""
def _wrapper(dynamic, batch_axes, *, treedef, static, flatten):
flat = _combine(dynamic, static)
inputs = jtu.tree_unflatten(treedef, flat)
batch_axes = [None if b is None else b for b in batch_axes]
batch_axes_static = [x if x is _missing_dynamic else None for x in static]
batch_axes = _combine(batch_axes, batch_axes_static)
batch_axes = jtu.tree_unflatten(treedef, batch_axes)
out, batch_axes = rule(inputs, batch_axes)
flat_out, flat_batch_axes = flatten(out, batch_axes)
flat_batch_axes = [None if b is None else b for b in flat_batch_axes]
return flat_out, flat_batch_axes
return _wrapper
def filter_primitive_bind(prim: jax.extend.core.Primitive, *args) -> PyTree:
"""Calls a primitive that has had its rules defined using the filter
functions above.
"""
assert prim.multiple_results
# If `args` contains a Jaxpr or ClosedJaxpr in its leaves, then it ends up as a
# member of the `static` tuple. This is important to ensure that jaxpr-rewriting
# passes are able to find it.
# (E.g. if `eqx.filter_closure_convert(...)` is an argument and we apply
# `jax.core.jaxprs_in_params`.)
flat, treedef = jtu.tree_flatten(args)
dynamic = [x for x in flat if is_array(x)]
static = tuple(_missing_dynamic if is_array(x) else x for x in flat)
flatten = Flatten()
flat_out = prim.bind(*dynamic, treedef=treedef, static=static, flatten=flatten)
treedef_out, static_out = flatten.get()
return combine(jtu.tree_unflatten(treedef_out, flat_out), static_out)
# Useful helper for JVP rules of higher-order primitives.
def materialise_zeros(primal, tangent, allow_struct=False):
arraylike = is_array_like(primal)
if allow_struct:
arraylike = arraylike or isinstance(primal, jax.ShapeDtypeStruct)
if tangent is None and arraylike:
tangent = _zero_from_primal(primal)
return ad.instantiate_zeros(tangent)
else:
return tangent
#
# vprim
# -----
# This allows for creating a primitive without needing to specify its batching rule.
# This is instead automatically obtained by vmap'ing its other rules (assuming that
# its rules are implemented using JAX operations).
#
_vprim_impl_registry = {}
_vprim_abstract_eval_registry = {}
_vprim_jvp_registry = {}
_vprim_transpose_registry = {}
def create_vprim(name: str, impl, abstract_eval, jvp, transpose):
prim = jax.extend.core.Primitive(name)
prim.multiple_results = True
def batch_rule(axis_size, axis_name, trace_type, inputs, batch_axes, **params):
del trace_type
if all(b is None for b in jtu.tree_leaves(batch_axes)):
out = prim.bind(*inputs, **params)
batch_axes_out = jtu.tree_map(lambda _: None, out)
else:
# delegates batching to `_vprim_p`
out = _vprim_p.bind(
*inputs,
prim=prim,
__axis_size=axis_size,
__axis_name=axis_name,
__batch_axes=batch_axes,
params=tuple(params.items()),
)
batch_axes_out = jtu.tree_map(lambda _: 0, out)
return out, batch_axes_out
prim.def_impl(impl)
prim.def_abstract_eval(abstract_eval)
ad.primitive_jvps[prim] = jvp
ad.primitive_transposes[prim] = transpose
batching.axis_primitive_batchers[prim] = batch_rule
mlir.register_lowering(prim, mlir.lower_fun(impl, multiple_results=True))
_vprim_impl_registry[prim] = impl
_vprim_abstract_eval_registry[prim] = abstract_eval
_vprim_jvp_registry[prim] = jvp
_vprim_transpose_registry[prim] = transpose
return prim
def _vprim_impl(*inputs, prim, __axis_size, __axis_name, __batch_axes, params):
impl = ft.partial(_vprim_impl_registry[prim], **dict(params))
impl = jax.vmap(
impl, in_axes=__batch_axes, axis_size=__axis_size, axis_name=__axis_name
)
return impl(*inputs)
if jax.__version_info__ >= (0, 5, 1):
def _unmapped_aval(axis_size, axis_name, axis, aval):
del axis_name
return jax.core.unmapped_aval(axis_size, axis, aval) # pyright: ignore[reportCallIssue]
else:
# signature (axis_size, axis_name, axis, aval)
_unmapped_aval = jax.core.unmapped_aval # pyright: ignore[reportAssignmentType]
def _vprim_abstract_eval(*inputs, prim, __axis_size, __axis_name, __batch_axes, params):
assert len(inputs) == len(__batch_axes)
inputs = [
jax.core.mapped_aval(__axis_size, b, x) for x, b in zip(inputs, __batch_axes)
]
abstract_eval = _vprim_abstract_eval_registry[prim]
outs = abstract_eval(*inputs, **dict(params))
outs = [_unmapped_aval(__axis_size, __axis_name, 0, x) for x in outs]
return outs
def _resolve_zeros_t(tangent, batch_axis):
if type(tangent) is ad.Zero and isinstance(batch_axis, int):
aval = tangent.aval
# Also accepts ConcreteArrays
if not isinstance(aval, jax.core.ShapedArray):
raise NotImplementedError(
"vprim only currently supports ShapedArrays for symbolic zeros"
)
shape = aval.shape[:batch_axis] + aval.shape[batch_axis + 1 :]
return ad.Zero(jax.core.ShapedArray(shape, aval.dtype))
else:
return tangent
def _resolve_zeros_b(tangent, batch_axis):
if type(tangent) is ad.Zero:
return None
else:
return batch_axis
def _vprim_jvp(
primals, tangents, *, prim, __axis_size, __axis_name, __batch_axes, params
):
assert len(primals) == len(__batch_axes)
assert len(tangents) == len(__batch_axes)
tangents = [_resolve_zeros_t(t, b) for t, b in zip(tangents, __batch_axes)]
batch_axes_t = [_resolve_zeros_b(t, b) for t, b in zip(tangents, __batch_axes)]
jvp = ft.partial(_vprim_jvp_registry[prim], **dict(params))
jvp = jax.vmap(
jvp,
in_axes=(__batch_axes, batch_axes_t),
axis_size=__axis_size,
axis_name=__axis_name,
)
return jvp(primals, tangents)
def _resolve_undefined_i(input, batch_axis):
if type(input) is ad.UndefinedPrimal and isinstance(batch_axis, int):
aval = input.aval
# Also accepts ConcreteArrays
if not isinstance(aval, jax.core.ShapedArray):
raise NotImplementedError(
"vprim only currently supports ShapedArrays for undefined primals"
)
shape = aval.shape[:batch_axis] + aval.shape[batch_axis + 1 :]
return ad.UndefinedPrimal(jax.core.ShapedArray(shape, aval.dtype))
else:
return input
def _resolve_undefined_b(input, batch_axis):
if type(input) is ad.UndefinedPrimal:
return None
else:
return batch_axis
def _vprim_transpose(
cts, *inputs, prim, __axis_size, __axis_name, __batch_axes, params
):
mapped_inputs = [_resolve_undefined_i(i, b) for i, b in zip(inputs, __batch_axes)]
batch_axes = [_resolve_undefined_b(i, b) for i, b in zip(inputs, __batch_axes)]
def _transpose(*_inputs):
_outputs = _vprim_transpose_registry[prim](*_inputs, **dict(params))
# `Zero` is not a JAX type -- it's an internal AD thing -- so we shouldn't pass
# it across the `vmap` boundary. In particular JAX won't apply the out batch
# axis to it.
# JAX allows for returning `None` to indicate no cotangent, so we use that
# instead, which is compatible with both `vmap` and `out_axes`.
return tuple(None if type(o) is ad.Zero else o for o in _outputs)
transpose = jax.vmap(
_transpose,
in_axes=(0, *batch_axes),
out_axes=__batch_axes,
axis_size=__axis_size,
axis_name=__axis_name,
)
if prim.multiple_results:
cts = tuple(None if type(c) is ad.Zero else c for c in cts)
else:
cts = None if type(cts) is ad.Zero else cts
outputs = transpose(cts, *mapped_inputs)
assert len(inputs) == len(outputs)
for i, o in zip(inputs, outputs):
if o is not None:
# Can't have cotangents on defined variables I think? The point of an
# `UndefinedPrimal` is to declare what you want cotangents with respect to.
assert type(i) is ad.UndefinedPrimal
# We've filtered out all other avals above, with a `NotImplementedError` if
# required.
assert isinstance(i.aval, jax.core.ShapedArray)
assert i.aval.shape == jnp.shape(o)
return outputs
# _vprim_p is itself a vprim!
_vprim_p = create_vprim(
"vprim", _vprim_impl, _vprim_abstract_eval, _vprim_jvp, _vprim_transpose
)
|
Flatten
|
python
|
chroma-core__chroma
|
chromadb/api/types.py
|
{
"start": 59700,
"end": 59795
}
|
class ____:
int_inverted_index: Optional[IntInvertedIndexType] = None
@dataclass
|
IntValueType
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/monitored_session_test.py
|
{
"start": 10368,
"end": 11188
}
|
class ____(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
|
FakeHook
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/partition_keys.py
|
{
"start": 769,
"end": 960
}
|
class ____(graphene.Union):
class Meta:
types = (GraphenePartitionKeys, GraphenePartitionSubsetDeserializationError)
name = "PartitionKeysOrError"
|
GraphenePartitionKeysOrError
|
python
|
realpython__materials
|
python-built-in-functions/point.py
|
{
"start": 14,
"end": 297
}
|
class ____:
def __init__(self, x, y):
self.x = x
self.y = y
@classmethod
def from_polar(cls, distance, angle):
return cls(
x=distance * math.cos(math.radians(angle)),
y=distance * math.sin(math.radians(angle)),
)
|
Point
|
python
|
huggingface__transformers
|
tests/models/roberta/test_modeling_roberta.py
|
{
"start": 14069,
"end": 26142
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": RobertaModel,
"fill-mask": RobertaForMaskedLM,
"question-answering": RobertaForQuestionAnswering,
"text-classification": RobertaForSequenceClassification,
"text-generation": RobertaForCausalLM,
"token-classification": RobertaForTokenClassification,
"zero-shot": RobertaForSequenceClassification,
}
if is_torch_available()
else {}
)
model_split_percents = [0.5, 0.8, 0.9]
# Overwriting to add `is_decoder` flag
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs = super().prepare_config_and_inputs_for_generate(batch_size)
config.is_decoder = True
return config, inputs
def setUp(self):
self.model_tester = RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "FacebookAI/roberta-base"
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = RobertaEmbeddings.create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds, embeddings.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
"""
Overwritten to account for the embeddings that rely on position ids.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
"flash_attention_3": "_supports_flash_attn",
}
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support {attn_implementation}")
# can't infer if new attn mask API is supported by assume that only model with attention backend support it
if not model_class._supports_attention_backend:
self.skipTest(f"{model_class.__name__} does not support new attention mask API")
if model_class._is_stateful: # non-transformer models most probably have no packing support
self.skipTest(f"{model_class.__name__} doesn't support packing!")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if config.is_encoder_decoder:
self.skipTest("Model is an encoder-decoder")
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2:
self.skipTest("Model dummy inputs should contain text input ids")
# make sure that all models have enough positions for generation
dummy_input_ids = inputs_dict["input_ids"]
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters:
continue # this model doesn't accept position ids as input
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Drop all keys except for the minimal set. Hard to manipulate with multimodals etc
inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]}
# Ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
# Main difference to other models, we need to prepare position ids according to the attention mask
# as we use it to extract embeddings that rely on the correct position - naively increasing sequences do
# not suffice anymore atp. The solution here calculates an increasing sequences for all 1s and puts 0s else.
inputs_dict["position_ids"] = ((inputs_dict["attention_mask"] == 1).long().cumsum(dim=1) - 1) * (
inputs_dict["attention_mask"] == 1
).long()
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
if fa_kwargs:
# flatten
features = [
{"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask)
]
# add position_ids + fa_kwargs
data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True)
batch = data_collator(features)
padfree_inputs_dict = {
k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()
}
else:
# create packed position_ids
position_ids = (
torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()])
.long()
.unsqueeze(0)
.to(torch_device)
)
padfree_inputs_dict = {
"input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0),
"position_ids": position_ids,
}
# We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[dummy_attention_mask.bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
@require_torch
|
RobertaModelTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-ragie/destination_ragie/client.py
|
{
"start": 2690,
"end": 20133
}
|
class ____:
# --- Constants ---
DEFAULT_API_URL = "https://api.ragie.ai"
# Endpoint for JSON data uploads (assuming this is correct, based on previous impl)
DOCUMENTS_RAW_ENDPOINT = "/documents/raw"
# Endpoint for Listing/Deleting/Querying (based on previous impl and connection check)
DOCUMENTS_GENERAL_ENDPOINT = "/documents"
METADATA_AIRBYTE_STREAM_FIELD = "airbyte_stream" # Use this key in RagieWriter as well
def __init__(self, config: RagieConfig):
self.config = config
self.base_url = config.api_url.rstrip("/") if config.api_url else self.DEFAULT_API_URL
self.api_key = config.api_key
self.session = self._create_session()
# Store partition header for reuse on *non-file-upload* requests
self.partition_header = {"partition": config.partition} if config.partition else {}
logger.info(f"RagieClient initialized. Base URL: {self.base_url}, Default Partition Scope: {config.partition or 'Account-wide'}")
def _create_session(self) -> requests.Session:
session = requests.Session()
session.headers.update(
{"Authorization": f"Bearer {self.api_key}", "Accept": "application/json", "X-source": "airbyte-destination-ragie"}
)
return session
@backoff.on_exception(
backoff.expo,
(requests.exceptions.RequestException, RagieApiError),
max_tries=5,
giveup=user_error,
on_backoff=lambda details: logger.warning(
f"Transient error detected ({details['exception']}). Retrying in {details['wait']:.1f}s."
),
factor=3,
)
def _request(
self,
method: str,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
json_data: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None, # For form data
files: Optional[Dict[str, Any]] = None, # For file uploads
extra_headers: Optional[Dict[str, str]] = None,
) -> requests.Response:
"""Makes an HTTP request with error handling, retries, and optional extra headers."""
full_url = f"{self.base_url}{endpoint}"
# Start with session headers (includes Auth, Accept, X-source)
request_headers = self.session.headers.copy()
# Apply specific per-request headers (like partition for non-file requests)
if extra_headers:
request_headers.update(extra_headers)
# Content-Type management:
# - If 'json_data' is present, set Content-Type to application/json
# - If 'files' is present, requests handles multipart/form-data Content-Type automatically
# - If only 'data' is present, requests handles application/x-www-form-urlencoded
# - Avoid setting Content-Type explicitly if 'files' are involved.
if json_data is not None and files is None:
request_headers["Content-Type"] = "application/json"
elif files is not None:
# Remove potentially conflicting Content-Type if files are present
request_headers.pop("Content-Type", None)
log_json = f" json_data: {json.dumps(json_data)[:200]}..." if json_data else ""
log_data = f" data_keys: {list(data.keys())}" if data else ""
log_files = f" files_keys: {list(files.keys())}" if files else ""
log_params = f" params: {params}" if params else ""
# Log effective headers *before* the request
logger.debug(f"Making {method} request to {full_url}{log_params}{log_json}{log_data}{log_files} with headers: {request_headers}")
try:
response = self.session.request(
method=method, url=full_url, params=params, json=json_data, data=data, files=files, headers=request_headers
)
logger.debug(f"Response status code: {response.status_code}")
response.raise_for_status() # Raises HTTPError for 4xx/5xx
return response
except requests.exceptions.HTTPError as e:
error_message = f"HTTP error {e.response.status_code} for {method} {full_url}."
try:
error_details = e.response.json()
error_message += f" Response: {json.dumps(error_details)}"
except json.JSONDecodeError:
error_message += f" Response Body: {e.response.text[:500]}" # Log raw response if not JSON
logger.error(error_message)
raise RagieApiError(error_message) from e
except requests.exceptions.RequestException as e:
# Network errors, timeouts etc.
logger.error(f"Request failed for {method} {full_url}: {e}")
raise RagieApiError(f"Request failed: {e}") from e # Wrap in custom error
def check_connection(self) -> Optional[str]:
"""Checks API key and connectivity using GET /documents."""
logger.info(
f"Performing connection check using GET {self.DOCUMENTS_GENERAL_ENDPOINT} with partition scope: {self.config.partition or 'default'}"
)
try:
# Use the general partition header for this GET request
response = self._request("GET", self.DOCUMENTS_GENERAL_ENDPOINT, params={"page_size": 1}, extra_headers=self.partition_header)
logger.info(f"Connection check successful (Endpoint {self.DOCUMENTS_GENERAL_ENDPOINT} responded with {response.status_code}).")
return None
except RagieApiError as e:
error_str = str(e)
if "401" in error_str:
return "Authentication failed: Invalid API Key."
if "403" in error_str:
return "Authorization failed: API Key lacks permissions for the specified partition or action."
# Check for other 4xx errors based on the status code in the message
status_code = None
if e.__cause__ and isinstance(e.__cause__, requests.exceptions.HTTPError):
status_code = e.__cause__.response.status_code
if status_code and 400 <= status_code < 500 and status_code not in [401, 403]:
return f"Connection check failed with status {status_code}. Check API URL, Partition, and configuration. Error: {error_str}"
# Generic API error or network error
return f"Failed to connect to Ragie API at {self.base_url}. Error: {e}"
except Exception as e:
logger.error(f"Unexpected error during connection check: {repr(e)}", exc_info=True)
return f"An unexpected error occurred during connection check: {repr(e)}"
def index_documents(self, documents: List[Dict[str, Any]]):
"""
Indexes documents one by one.
Uses POST /documents/raw for JSON data uploads (application/json).
"""
if not documents:
return
logger.info(f"Indexing {len(documents)} JSON documents one by one...")
successful_count = 0
for item_payload in documents:
doc_id_log = item_payload.get("external_id") or item_payload.get("name", "N/A")
try:
# --- Handle JSON Data Upload via POST /documents/raw ---
endpoint = self.DOCUMENTS_RAW_ENDPOINT
method = "POST"
# For JSON uploads, we do want the partition header if set
headers = self.partition_header
logger.debug(
f"Indexing JSON document via {endpoint}: Name='{item_payload.get('name', 'N/A')}', ExternalID='{item_payload.get('external_id')}'"
)
# Make the request with json_data and potential partition header
self._request(
method=method,
endpoint=endpoint,
json_data=item_payload, # Send the whole payload as JSON body
extra_headers=headers, # Pass partition header
)
logger.debug(
f"Successfully requested indexing for JSON document: Name='{item_payload.get('name', 'N/A')}', ExternalID='{item_payload.get('external_id')}'"
)
successful_count += 1
except Exception as e:
logger.error(f"Failed to index document '{doc_id_log}': {e}", exc_info=True)
internal_msg = f"PayloadKeys: {list(item_payload.keys())}"
error_details = str(e) if isinstance(e, RagieApiError) else repr(e)
internal_msg += f", Error: {error_details}"
# Determine failure type
failure_type = FailureType.system_error # Default
if isinstance(e, RagieApiError) and e.__cause__:
if isinstance(e.__cause__, requests.exceptions.HTTPError):
status = e.__cause__.response.status_code
if 400 <= status < 500 and status not in [404, 429]:
failure_type = FailureType.config_error # User config likely caused 4xx
raise AirbyteTracedException(
message=f"Failed to index document '{doc_id_log}' into Ragie.",
internal_message=internal_msg[:1000], # Limit length
failure_type=failure_type,
) from e
logger.info(f"Successfully processed {successful_count} indexing requests.")
# --- Metadata Filtering/Querying (_build_filter_json, find_ids_by_metadata, find_docs_by_metadata) ---
def _build_filter_json(self, filter_conditions: Dict[str, Any]) -> Dict[str, Any]:
"""Builds a Ragie filter JSON object structure. (No changes needed)"""
if not filter_conditions:
logger.warning("Attempted to build filter JSON from empty conditions.")
return {}
and_conditions = []
supported_operators = {"$eq", "$ne", "$gt", "$gte", "$lt", "$lte", "$in", "$nin"}
for key, value in filter_conditions.items():
filter_key_path = f"{key}"
if isinstance(value, dict) and next(iter(value)) in supported_operators:
condition = {filter_key_path: value}
elif isinstance(value, (str, int, float, bool)):
condition = {filter_key_path: {"$eq": value}}
elif isinstance(value, list):
condition = {filter_key_path: {"$in": value}}
else:
logger.warning(f"Unsupported value type ({type(value)}) for filter key '{key}'. Skipping condition.")
continue
and_conditions.append(condition)
if not and_conditions:
return {}
final_filter_obj = and_conditions[0] if len(and_conditions) == 1 else {"$and": and_conditions}
logger.debug(f"Built filter JSON: {final_filter_obj}")
return final_filter_obj
def find_ids_by_metadata(self, filter_conditions: Dict[str, Any]) -> List[str]:
"""Finds internal Ragie document IDs using GET /documents."""
logger.info(f"Querying Ragie document IDs with filter: {filter_conditions}")
found_internal_ids = []
cursor = None
page_size = 100
filter_json_obj = self._build_filter_json(filter_conditions)
if not filter_json_obj:
return []
filter_param_string = json.dumps(filter_json_obj)
# Use general partition header for GET requests
headers = self.partition_header
while True:
params = {"page_size": page_size, "filter": filter_param_string}
if cursor:
params["cursor"] = cursor
try:
# Use the general endpoint for listing/querying
response = self._request("GET", self.DOCUMENTS_GENERAL_ENDPOINT, params=params, extra_headers=headers)
response_data = response.json()
documents_on_page = response_data.get("documents", [])
if not documents_on_page:
break
page_ids = [doc.get("id") for doc in documents_on_page if doc.get("id")]
found_internal_ids.extend(page_ids)
logger.debug(f"Found {len(page_ids)} IDs page. Total: {len(found_internal_ids)}")
cursor = response_data.get("pagination", {}).get("next_cursor")
if not cursor:
break
except Exception as e:
logger.error(f"Failed during document ID query (filter='{filter_param_string}', cursor='{cursor}'): {e}", exc_info=True)
raise AirbyteTracedException(
message="Failed to query Ragie document IDs.",
internal_message=f"Filter: {filter_param_string}, Error: {e}",
failure_type=FailureType.system_error,
) from e
logger.info(f"Found {len(found_internal_ids)} total document IDs matching filter.")
return found_internal_ids
def find_docs_by_metadata(self, filter_conditions: Dict[str, Any], fields: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""Finds full documents using GET /documents."""
logger.info(f"Querying Ragie documents with filter: {filter_conditions}" + (f" fields: {fields}" if fields else ""))
found_docs = []
cursor = None
page_size = 100
filter_json_obj = self._build_filter_json(filter_conditions)
if not filter_json_obj:
return []
filter_param_string = json.dumps(filter_json_obj)
# Use general partition header for GET requests
headers = self.partition_header
while True:
params = {"page_size": page_size, "filter": filter_param_string}
if fields:
params["fields"] = ",".join(fields)
if cursor:
params["cursor"] = cursor
try:
# Use the general endpoint for listing/querying
response = self._request("GET", self.DOCUMENTS_GENERAL_ENDPOINT, params=params, extra_headers=headers)
response_data = response.json()
documents_on_page = response_data.get("documents", [])
if not documents_on_page:
break
found_docs.extend(documents_on_page)
logger.debug(f"Found {len(documents_on_page)} docs page. Total: {len(found_docs)}")
cursor = response_data.get("pagination", {}).get("next_cursor")
if not cursor:
break
except Exception as e:
logger.error(f"Failed during document query (filter='{filter_param_string}', cursor='{cursor}'): {e}", exc_info=True)
raise AirbyteTracedException(
message="Failed to query Ragie documents.",
internal_message=f"Filter: {filter_param_string}, Fields: {fields}, Error: {e}",
failure_type=FailureType.system_error,
) from e
logger.info(f"Found {len(found_docs)} total documents matching filter.")
return found_docs
# --- Deletion Logic ---
def delete_documents_by_id(self, internal_ids: List[str]):
"""Deletes documents one by one using DELETE /documents/{internal_id}."""
if not internal_ids:
return
logger.info(f"Attempting to delete {len(internal_ids)} documents by internal Ragie ID.")
successful_deletes = 0
failed_deletes = 0
# Use general partition header for DELETE requests
headers = self.partition_header
for internal_id in internal_ids:
if not internal_id or not isinstance(internal_id, str):
logger.warning(f"Invalid internal ID for deletion: {internal_id}. Skipping.")
failed_deletes += 1
continue
# Construct endpoint using the general documents endpoint base
delete_endpoint = f"{self.DOCUMENTS_GENERAL_ENDPOINT}/{internal_id}"
try:
self._request("DELETE", delete_endpoint, extra_headers=headers)
successful_deletes += 1
logger.debug(f"Successfully deleted document with internal_id: {internal_id}")
except RagieApiError as e:
error_str = str(e)
status_code = None
if e.__cause__ and isinstance(e.__cause__, requests.exceptions.HTTPError):
status_code = e.__cause__.response.status_code
if status_code == 404:
logger.warning(f"Document internal_id {internal_id} not found for deletion (404). Assuming deleted.")
successful_deletes += 1 # Count 404 as success during delete
else:
logger.error(f"Failed to delete document internal_id {internal_id}: {e}")
failed_deletes += 1
except Exception as e:
logger.error(f"Unexpected error deleting document internal_id {internal_id}: {e}", exc_info=True)
failed_deletes += 1
logger.info(f"Deletion result: {successful_deletes} successful (incl 404s), {failed_deletes} failures.")
if failed_deletes > 0:
raise AirbyteTracedException(
message=f"Failed to delete {failed_deletes} out of {len(internal_ids)} documents (excluding 404s).",
failure_type=FailureType.system_error,
)
|
RagieClient
|
python
|
python__mypy
|
mypy/join.py
|
{
"start": 10295,
"end": 38962
}
|
class ____(TypeVisitor[ProperType]):
"""Implementation of the least upper bound algorithm.
Attributes:
s: The other (left) type operand.
"""
def __init__(self, s: ProperType, instance_joiner: InstanceJoiner | None = None) -> None:
self.s = s
self.instance_joiner = instance_joiner
def visit_unbound_type(self, t: UnboundType) -> ProperType:
return AnyType(TypeOfAny.special_form)
def visit_union_type(self, t: UnionType) -> ProperType:
if is_proper_subtype(self.s, t):
return t
else:
return mypy.typeops.make_simplified_union([self.s, t])
def visit_any(self, t: AnyType) -> ProperType:
return t
def visit_none_type(self, t: NoneType) -> ProperType:
if state.strict_optional:
if isinstance(self.s, (NoneType, UninhabitedType)):
return t
elif isinstance(self.s, (UnboundType, AnyType)):
return AnyType(TypeOfAny.special_form)
else:
return mypy.typeops.make_simplified_union([self.s, t])
else:
return self.s
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return self.s
def visit_deleted_type(self, t: DeletedType) -> ProperType:
return self.s
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.s
def visit_type_var(self, t: TypeVarType) -> ProperType:
if isinstance(self.s, TypeVarType) and self.s.id == t.id:
if self.s.upper_bound == t.upper_bound:
return self.s
return self.s.copy_modified(upper_bound=join_types(self.s.upper_bound, t.upper_bound))
else:
return self.default(self.s)
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
if self.s == t:
return t
return self.default(self.s)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
if self.s == t:
return t
if isinstance(self.s, Instance) and is_subtype(t.upper_bound, self.s):
# TODO: should we do this more generally and for all TypeVarLikeTypes?
return self.s
return self.default(self.s)
def visit_unpack_type(self, t: UnpackType) -> UnpackType:
raise NotImplementedError
def visit_parameters(self, t: Parameters) -> ProperType:
if isinstance(self.s, Parameters):
if not is_similar_params(t, self.s):
# TODO: it would be prudent to return [*object, **object] instead of Any.
return self.default(self.s)
from mypy.meet import meet_types
return t.copy_modified(
arg_types=[
meet_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)
],
arg_names=combine_arg_names(self.s, t),
)
else:
return self.default(self.s)
def visit_instance(self, t: Instance) -> ProperType:
if isinstance(self.s, Instance):
if self.instance_joiner is None:
self.instance_joiner = InstanceJoiner()
nominal = self.instance_joiner.join_instances(t, self.s)
structural: Instance | None = None
if t.type.is_protocol and is_protocol_implementation(self.s, t):
structural = t
elif self.s.type.is_protocol and is_protocol_implementation(t, self.s):
structural = self.s
# Structural join is preferred in the case where we have found both
# structural and nominal and they have same MRO length (see two comments
# in join_instances_via_supertype). Otherwise, just return the nominal join.
if not structural or is_better(nominal, structural):
return nominal
return structural
elif isinstance(self.s, FunctionLike):
if t.type.is_protocol:
call = unpack_callback_protocol(t)
if call:
return join_types(call, self.s)
return join_types(t, self.s.fallback)
elif isinstance(self.s, TypeType):
return join_types(t, self.s)
elif isinstance(self.s, TypedDictType):
return join_types(t, self.s)
elif isinstance(self.s, TupleType):
return join_types(t, self.s)
elif isinstance(self.s, LiteralType):
return join_types(t, self.s)
elif isinstance(self.s, TypeVarTupleType) and is_subtype(self.s.upper_bound, t):
return t
else:
return self.default(self.s)
def visit_callable_type(self, t: CallableType) -> ProperType:
if isinstance(self.s, CallableType) and is_similar_callables(t, self.s):
if is_equivalent(t, self.s):
return combine_similar_callables(t, self.s)
result = join_similar_callables(t, self.s)
# We set the from_type_type flag to suppress error when a collection of
# concrete class objects gets inferred as their common abstract superclass.
if not (
(t.is_type_obj() and t.type_object().is_abstract)
or (self.s.is_type_obj() and self.s.type_object().is_abstract)
):
result.from_type_type = True
if any(
isinstance(tp, (NoneType, UninhabitedType))
for tp in get_proper_types(result.arg_types)
):
# We don't want to return unusable Callable, attempt fallback instead.
return join_types(t.fallback, self.s)
return result
elif isinstance(self.s, Overloaded):
# Switch the order of arguments to that we'll get to visit_overloaded.
return join_types(t, self.s)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = unpack_callback_protocol(self.s)
if call:
return join_types(t, call)
return join_types(t.fallback, self.s)
def visit_overloaded(self, t: Overloaded) -> ProperType:
# This is more complex than most other cases. Here are some
# examples that illustrate how this works.
#
# First let's define a concise notation:
# - Cn are callable types (for n in 1, 2, ...)
# - Ov(C1, C2, ...) is an overloaded type with items C1, C2, ...
# - Callable[[T, ...], S] is written as [T, ...] -> S.
#
# We want some basic properties to hold (assume Cn are all
# unrelated via Any-similarity):
#
# join(Ov(C1, C2), C1) == C1
# join(Ov(C1, C2), Ov(C1, C2)) == Ov(C1, C2)
# join(Ov(C1, C2), Ov(C1, C3)) == C1
# join(Ov(C2, C2), C3) == join of fallback types
#
# The presence of Any types makes things more interesting. The join is the
# most general type we can get with respect to Any:
#
# join(Ov([int] -> int, [str] -> str), [Any] -> str) == Any -> str
#
# We could use a simplification step that removes redundancies, but that's not
# implemented right now. Consider this example, where we get a redundancy:
#
# join(Ov([int, Any] -> Any, [str, Any] -> Any), [Any, int] -> Any) ==
# Ov([Any, int] -> Any, [Any, int] -> Any)
#
# TODO: Consider more cases of callable subtyping.
result: list[CallableType] = []
s = self.s
if isinstance(s, FunctionLike):
# The interesting case where both types are function types.
for t_item in t.items:
for s_item in s.items:
if is_similar_callables(t_item, s_item):
if is_equivalent(t_item, s_item):
result.append(combine_similar_callables(t_item, s_item))
elif is_subtype(t_item, s_item):
result.append(s_item)
if result:
# TODO: Simplify redundancies from the result.
if len(result) == 1:
return result[0]
else:
return Overloaded(result)
return join_types(t.fallback, s.fallback)
elif isinstance(s, Instance) and s.type.is_protocol:
call = unpack_callback_protocol(s)
if call:
return join_types(t, call)
return join_types(t.fallback, s)
def join_tuples(self, s: TupleType, t: TupleType) -> list[Type] | None:
"""Join two tuple types while handling variadic entries.
This is surprisingly tricky, and we don't handle some tricky corner cases.
Most of the trickiness comes from the variadic tuple items like *tuple[X, ...]
since they can have arbitrary partial overlaps (while *Ts can't be split).
"""
s_unpack_index = find_unpack_in_list(s.items)
t_unpack_index = find_unpack_in_list(t.items)
if s_unpack_index is None and t_unpack_index is None:
if s.length() == t.length():
items: list[Type] = []
for i in range(t.length()):
items.append(join_types(t.items[i], s.items[i]))
return items
return None
if s_unpack_index is not None and t_unpack_index is not None:
# The most complex case: both tuples have an unpack item.
s_unpack = s.items[s_unpack_index]
assert isinstance(s_unpack, UnpackType)
s_unpacked = get_proper_type(s_unpack.type)
t_unpack = t.items[t_unpack_index]
assert isinstance(t_unpack, UnpackType)
t_unpacked = get_proper_type(t_unpack.type)
if s.length() == t.length() and s_unpack_index == t_unpack_index:
# We can handle a case where arity is perfectly aligned, e.g.
# join(Tuple[X1, *tuple[Y1, ...], Z1], Tuple[X2, *tuple[Y2, ...], Z2]).
# We can essentially perform the join elementwise.
prefix_len = t_unpack_index
suffix_len = t.length() - t_unpack_index - 1
items = []
for si, ti in zip(s.items[:prefix_len], t.items[:prefix_len]):
items.append(join_types(si, ti))
joined = join_types(s_unpacked, t_unpacked)
if isinstance(joined, TypeVarTupleType):
items.append(UnpackType(joined))
elif isinstance(joined, Instance) and joined.type.fullname == "builtins.tuple":
items.append(UnpackType(joined))
else:
if isinstance(t_unpacked, Instance):
assert t_unpacked.type.fullname == "builtins.tuple"
tuple_instance = t_unpacked
else:
assert isinstance(t_unpacked, TypeVarTupleType)
tuple_instance = t_unpacked.tuple_fallback
items.append(
UnpackType(
tuple_instance.copy_modified(
args=[object_from_instance(tuple_instance)]
)
)
)
if suffix_len:
for si, ti in zip(s.items[-suffix_len:], t.items[-suffix_len:]):
items.append(join_types(si, ti))
return items
if s.length() == 1 or t.length() == 1:
# Another case we can handle is when one of tuple is purely variadic
# (i.e. a non-normalized form of tuple[X, ...]), in this case the join
# will be again purely variadic.
if not (isinstance(s_unpacked, Instance) and isinstance(t_unpacked, Instance)):
return None
assert s_unpacked.type.fullname == "builtins.tuple"
assert t_unpacked.type.fullname == "builtins.tuple"
mid_joined = join_types(s_unpacked.args[0], t_unpacked.args[0])
t_other = [a for i, a in enumerate(t.items) if i != t_unpack_index]
s_other = [a for i, a in enumerate(s.items) if i != s_unpack_index]
other_joined = join_type_list(s_other + t_other)
mid_joined = join_types(mid_joined, other_joined)
return [UnpackType(s_unpacked.copy_modified(args=[mid_joined]))]
# TODO: are there other case we can handle (e.g. both prefix/suffix are shorter)?
return None
if s_unpack_index is not None:
variadic = s
unpack_index = s_unpack_index
fixed = t
else:
assert t_unpack_index is not None
variadic = t
unpack_index = t_unpack_index
fixed = s
# Case where one tuple has variadic item and the other one doesn't. The join will
# be variadic, since fixed tuple is a subtype of variadic, but not vice versa.
unpack = variadic.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if not isinstance(unpacked, Instance):
return None
if fixed.length() < variadic.length() - 1:
# There are no non-trivial types that are supertype of both.
return None
prefix_len = unpack_index
suffix_len = variadic.length() - prefix_len - 1
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple(fixed.items), prefix_len, suffix_len
)
items = []
for fi, vi in zip(prefix, variadic.items[:prefix_len]):
items.append(join_types(fi, vi))
mid_joined = join_type_list(list(middle))
mid_joined = join_types(mid_joined, unpacked.args[0])
items.append(UnpackType(unpacked.copy_modified(args=[mid_joined])))
if suffix_len:
for fi, vi in zip(suffix, variadic.items[-suffix_len:]):
items.append(join_types(fi, vi))
return items
def visit_tuple_type(self, t: TupleType) -> ProperType:
# When given two fixed-length tuples:
# * If they have the same length, join their subtypes item-wise:
# Tuple[int, bool] + Tuple[bool, bool] becomes Tuple[int, bool]
# * If lengths do not match, return a variadic tuple:
# Tuple[bool, int] + Tuple[bool] becomes Tuple[int, ...]
#
# Otherwise, `t` is a fixed-length tuple but `self.s` is NOT:
# * Joining with a variadic tuple returns variadic tuple:
# Tuple[int, bool] + Tuple[bool, ...] becomes Tuple[int, ...]
# * Joining with any Sequence also returns a Sequence:
# Tuple[int, bool] + List[bool] becomes Sequence[int]
if isinstance(self.s, TupleType):
if self.instance_joiner is None:
self.instance_joiner = InstanceJoiner()
fallback = self.instance_joiner.join_instances(
mypy.typeops.tuple_fallback(self.s), mypy.typeops.tuple_fallback(t)
)
assert isinstance(fallback, Instance)
items = self.join_tuples(self.s, t)
if items is not None:
if len(items) == 1 and isinstance(item := items[0], UnpackType):
if isinstance(unpacked := get_proper_type(item.type), Instance):
# Avoid double-wrapping tuple[*tuple[X, ...]]
return unpacked
return TupleType(items, fallback)
else:
# TODO: should this be a default fallback behaviour like for meet?
if is_proper_subtype(self.s, t):
return t
if is_proper_subtype(t, self.s):
return self.s
return fallback
else:
return join_types(self.s, mypy.typeops.tuple_fallback(t))
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
if isinstance(self.s, TypedDictType):
items = {
item_name: s_item_type
for (item_name, s_item_type, t_item_type) in self.s.zip(t)
if (
is_equivalent(s_item_type, t_item_type)
and (item_name in t.required_keys) == (item_name in self.s.required_keys)
)
}
fallback = self.s.create_anonymous_fallback()
all_keys = set(items.keys())
# We need to filter by items.keys() since some required keys present in both t and
# self.s might be missing from the join if the types are incompatible.
required_keys = all_keys & t.required_keys & self.s.required_keys
# If one type has a key as readonly, we mark it as readonly for both:
readonly_keys = (t.readonly_keys | t.readonly_keys) & all_keys
return TypedDictType(items, required_keys, readonly_keys, fallback)
elif isinstance(self.s, Instance):
return join_types(self.s, t.fallback)
else:
return self.default(self.s)
def visit_literal_type(self, t: LiteralType) -> ProperType:
if isinstance(self.s, LiteralType):
if t == self.s:
return t
if self.s.fallback.type.is_enum and t.fallback.type.is_enum:
return mypy.typeops.make_simplified_union([self.s, t])
return join_types(self.s.fallback, t.fallback)
elif isinstance(self.s, Instance) and self.s.last_known_value == t:
return t
else:
return join_types(self.s, t.fallback)
def visit_partial_type(self, t: PartialType) -> ProperType:
# We only have partial information so we can't decide the join result. We should
# never get here.
assert False, "Internal error"
def visit_type_type(self, t: TypeType) -> ProperType:
if isinstance(self.s, TypeType):
return TypeType.make_normalized(
join_types(t.item, self.s.item),
line=t.line,
is_type_form=self.s.is_type_form or t.is_type_form,
)
elif isinstance(self.s, Instance) and self.s.type.fullname == "builtins.type":
return self.s
else:
return self.default(self.s)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, f"This should be never called, got {t}"
def default(self, typ: Type) -> ProperType:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return object_from_instance(typ)
elif isinstance(typ, TypeType):
return self.default(typ.item)
elif isinstance(typ, UnboundType):
return AnyType(TypeOfAny.special_form)
elif isinstance(typ, TupleType):
return self.default(mypy.typeops.tuple_fallback(typ))
elif isinstance(typ, TypedDictType):
return self.default(typ.fallback)
elif isinstance(typ, FunctionLike):
return self.default(typ.fallback)
elif isinstance(typ, TypeVarType):
return self.default(typ.upper_bound)
elif isinstance(typ, ParamSpecType):
return self.default(typ.upper_bound)
else:
return AnyType(TypeOfAny.special_form)
def is_better(t: Type, s: Type) -> bool:
# Given two possible results from join_instances_via_supertype(),
# indicate whether t is the better one.
t = get_proper_type(t)
s = get_proper_type(s)
if isinstance(t, Instance):
if not isinstance(s, Instance):
return True
if t.type.is_protocol != s.type.is_protocol:
if t.type.fullname != "builtins.object" and s.type.fullname != "builtins.object":
# mro of protocol is not really relevant
return not t.type.is_protocol
# Use len(mro) as a proxy for the better choice.
if len(t.type.mro) > len(s.type.mro):
return True
return False
def normalize_callables(s: ProperType, t: ProperType) -> tuple[ProperType, ProperType]:
if isinstance(s, (CallableType, Overloaded)):
s = s.with_unpacked_kwargs()
if isinstance(t, (CallableType, Overloaded)):
t = t.with_unpacked_kwargs()
return s, t
def is_similar_callables(t: CallableType, s: CallableType) -> bool:
"""Return True if t and s have identical numbers of
arguments, default arguments and varargs.
"""
return (
len(t.arg_types) == len(s.arg_types)
and t.min_args == s.min_args
and t.is_var_arg == s.is_var_arg
)
def is_similar_params(t: Parameters, s: Parameters) -> bool:
# This matches the logic in is_similar_callables() above.
return (
len(t.arg_types) == len(s.arg_types)
and t.min_args == s.min_args
and (t.var_arg() is not None) == (s.var_arg() is not None)
)
def update_callable_ids(c: CallableType, ids: list[TypeVarId]) -> CallableType:
tv_map = {}
tvs = []
for tv, new_id in zip(c.variables, ids):
new_tv = tv.copy_modified(id=new_id)
tvs.append(new_tv)
tv_map[tv.id] = new_tv
return expand_type(c, tv_map).copy_modified(variables=tvs)
def match_generic_callables(t: CallableType, s: CallableType) -> tuple[CallableType, CallableType]:
# The case where we combine/join/meet similar callables, situation where both are generic
# requires special care. A more principled solution may involve unify_generic_callable(),
# but it would have two problems:
# * This adds risk of infinite recursion: e.g. join -> unification -> solver -> join
# * Using unification is an incorrect thing for meets, as it "widens" the types
# Finally, this effectively falls back to an old behaviour before namespaces were added to
# type variables, and it worked relatively well.
max_len = max(len(t.variables), len(s.variables))
min_len = min(len(t.variables), len(s.variables))
if min_len == 0:
return t, s
new_ids = [TypeVarId.new(meta_level=0) for _ in range(max_len)]
# Note: this relies on variables being in order they appear in function definition.
return update_callable_ids(t, new_ids), update_callable_ids(s, new_ids)
def join_similar_callables(t: CallableType, s: CallableType) -> CallableType:
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_meet(t.arg_types[i], s.arg_types[i]))
# TODO in combine_similar_callables also applies here (names and kinds; user metaclasses)
# The fallback type can be either 'function', 'type', or some user-provided metaclass.
# The result should always use 'function' as a fallback if either operands are using it.
if t.fallback.type.fullname == "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
arg_names=combine_arg_names(t, s),
ret_type=join_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def safe_join(t: Type, s: Type) -> Type:
# This is a temporary solution to prevent crashes in combine_similar_callables() etc.,
# until relevant TODOs on handling arg_kinds will be addressed there.
if not isinstance(t, UnpackType) and not isinstance(s, UnpackType):
return join_types(t, s)
if isinstance(t, UnpackType) and isinstance(s, UnpackType):
return UnpackType(join_types(t.type, s.type))
return object_or_any_from_type(get_proper_type(t))
def safe_meet(t: Type, s: Type) -> Type:
# Similar to above but for meet_types().
from mypy.meet import meet_types
if not isinstance(t, UnpackType) and not isinstance(s, UnpackType):
return meet_types(t, s)
if isinstance(t, UnpackType) and isinstance(s, UnpackType):
unpacked = get_proper_type(t.type)
if isinstance(unpacked, TypeVarTupleType):
fallback_type = unpacked.tuple_fallback.type
elif isinstance(unpacked, TupleType):
fallback_type = unpacked.partial_fallback.type
else:
assert isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
fallback_type = unpacked.type
res = meet_types(t.type, s.type)
if isinstance(res, UninhabitedType):
res = Instance(fallback_type, [res])
return UnpackType(res)
return UninhabitedType()
def combine_similar_callables(t: CallableType, s: CallableType) -> CallableType:
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_join(t.arg_types[i], s.arg_types[i]))
# TODO kinds and argument names
# TODO what should happen if one fallback is 'type' and the other is a user-provided metaclass?
# The fallback type can be either 'function', 'type', or some user-provided metaclass.
# The result should always use 'function' as a fallback if either operands are using it.
if t.fallback.type.fullname == "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
arg_names=combine_arg_names(t, s),
ret_type=join_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def combine_arg_names(
t: CallableType | Parameters, s: CallableType | Parameters
) -> list[str | None]:
"""Produces a list of argument names compatible with both callables.
For example, suppose 't' and 's' have the following signatures:
- t: (a: int, b: str, X: str) -> None
- s: (a: int, b: str, Y: str) -> None
This function would return ["a", "b", None]. This information
is then used above to compute the join of t and s, which results
in a signature of (a: int, b: str, str) -> None.
Note that the third argument's name is omitted and 't' and 's'
are both valid subtypes of this inferred signature.
Precondition: is_similar_types(t, s) is true.
"""
num_args = len(t.arg_types)
new_names = []
for i in range(num_args):
t_name = t.arg_names[i]
s_name = s.arg_names[i]
if t_name == s_name or t.arg_kinds[i].is_named() or s.arg_kinds[i].is_named():
new_names.append(t_name)
else:
new_names.append(None)
return new_names
def object_from_instance(instance: Instance) -> Instance:
"""Construct the type 'builtins.object' from an instance type."""
# Use the fact that 'object' is always the last class in the mro.
res = Instance(instance.type.mro[-1], [])
return res
def object_or_any_from_type(typ: ProperType) -> ProperType:
# Similar to object_from_instance() but tries hard for all types.
# TODO: find a better way to get object, or make this more reliable.
if isinstance(typ, Instance):
return object_from_instance(typ)
elif isinstance(typ, (CallableType, TypedDictType, LiteralType)):
return object_from_instance(typ.fallback)
elif isinstance(typ, TupleType):
return object_from_instance(typ.partial_fallback)
elif isinstance(typ, TypeType):
return object_or_any_from_type(typ.item)
elif isinstance(typ, TypeVarLikeType) and isinstance(typ.upper_bound, ProperType):
return object_or_any_from_type(typ.upper_bound)
elif isinstance(typ, UnionType):
for item in typ.items:
if isinstance(item, ProperType):
candidate = object_or_any_from_type(item)
if isinstance(candidate, Instance):
return candidate
elif isinstance(typ, UnpackType):
object_or_any_from_type(get_proper_type(typ.type))
return AnyType(TypeOfAny.implementation_artifact)
def join_type_list(types: Sequence[Type]) -> Type:
if not types:
# This is a little arbitrary but reasonable. Any empty tuple should be compatible
# with all variable length tuples, and this makes it possible.
return UninhabitedType()
joined = types[0]
for t in types[1:]:
joined = join_types(joined, t)
return joined
def unpack_callback_protocol(t: Instance) -> ProperType | None:
assert t.type.is_protocol
if t.type.protocol_members == ["__call__"]:
return get_proper_type(find_member("__call__", t, t, is_operator=True))
return None
|
TypeJoinVisitor
|
python
|
tiangolo__fastapi
|
tests/test_duplicate_models_openapi.py
|
{
"start": 155,
"end": 195
}
|
class ____(BaseModel):
a: Model
|
Model2
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/distlib/locators.py
|
{
"start": 1679,
"end": 2945
}
|
class ____(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
|
RedirectHandler
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/libraries/__init__.py
|
{
"start": 209,
"end": 3586
}
|
class ____:
_libraries: dict[str, str] = {"dagster-shared": __version__}
@classmethod
def register(cls, name: str, version: str, *, is_dagster_package: bool = True):
if is_dagster_package:
check_dagster_package_version(name, version)
cls._libraries[name] = version
@classmethod
def get(cls) -> Mapping[str, str]:
return cls._libraries.copy()
def parse_package_version(version_str: str) -> Version:
parsed_version = Version(version_str)
assert isinstance(parsed_version, Version)
return parsed_version
def increment_micro_version(v: Version, interval: int) -> Version:
major, minor, micro = v.major, v.minor, v.micro
new_micro = micro + interval
if new_micro < 0:
raise ValueError(f"Micro version cannot be negative: {new_micro}")
return Version(f"{major}.{minor}.{new_micro}")
def check_dagster_package_version(library_name: str, library_version: str) -> None:
# This import must be internal in order for this function to be testable
from dagster_shared.version import __version__
parsed_lib_version = parse_package_version(library_version)
if parsed_lib_version.release[0] >= 1:
if library_version != __version__:
message = (
f"Found version mismatch between `dagster-shared` ({__version__})"
f"and `{library_name}` ({library_version})"
)
warnings.warn(message)
else:
target_version = library_version_from_core_version(__version__)
if library_version != target_version:
message = (
f"Found version mismatch between `dagster-shared` ({__version__}) "
f"expected library version ({target_version}) "
f"and `{library_name}` ({library_version})."
)
warnings.warn(message)
# Use this to get the "library version" (pre-1.0 version) from the "core version" (post 1.0
# version). 16 is from the 0.16.0 that library versions stayed on when core went to 1.0.0.
def library_version_from_core_version(core_version: str) -> str:
parsed_version = parse_package_version(core_version)
release = parsed_version.release
if release[0] >= 1:
library_version = ".".join(["0", str(16 + release[1]), str(release[2])])
if parsed_version.is_prerelease:
library_version = library_version + "".join(
[str(pre) for pre in check.not_none(parsed_version.pre)]
)
if parsed_version.is_postrelease:
library_version = library_version + "post" + str(parsed_version.post)
return library_version
else:
return core_version
def core_version_from_library_version(library_version: str) -> str:
parsed_version = parse_package_version(library_version)
release = parsed_version.release
if release[0] < 1 and len(release) > 1:
core_version = ".".join(["1", str(release[1] - 16), str(release[2])])
if parsed_version.is_prerelease:
core_version = core_version + "".join(
[str(pre) for pre in check.not_none(parsed_version.pre)]
)
if parsed_version.is_postrelease:
core_version = core_version + "post" + str(parsed_version.post)
return core_version
else:
return library_version
|
DagsterLibraryRegistry
|
python
|
mlflow__mlflow
|
tests/tracking/test_mlflow_artifacts.py
|
{
"start": 1027,
"end": 15475
}
|
class ____(NamedTuple):
backend_store_uri: str
default_artifact_root: str
artifacts_destination: str
url: str
process: subprocess.Popen
@pytest.fixture(scope="module")
def artifacts_server():
with tempfile.TemporaryDirectory() as tmpdir:
port = get_safe_port()
backend_store_uri = f"sqlite:///{os.path.join(tmpdir, 'mlruns.db')}"
artifacts_destination = os.path.join(tmpdir, "mlartifacts")
url = f"http://{LOCALHOST}:{port}"
default_artifact_root = f"{url}/api/2.0/mlflow-artifacts/artifacts"
process = _launch_server(
LOCALHOST,
port,
backend_store_uri,
default_artifact_root,
("file:///" + artifacts_destination if is_windows() else artifacts_destination),
)
yield ArtifactsServer(
backend_store_uri, default_artifact_root, artifacts_destination, url, process
)
process.kill()
def read_file(path):
with open(path) as f:
return f.read()
def upload_file(path, url, headers=None):
with open(path, "rb") as f:
requests.put(url, data=f, headers=headers).raise_for_status()
def download_file(url, local_path, headers=None):
with requests.get(url, stream=True, headers=headers) as r:
r.raise_for_status()
assert r.headers["X-Content-Type-Options"] == "nosniff"
assert "Content-Type" in r.headers
assert "Content-Disposition" in r.headers
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return r
def test_mlflow_artifacts_rest_apis(artifacts_server, tmp_path):
default_artifact_root = artifacts_server.default_artifact_root
artifacts_destination = artifacts_server.artifacts_destination
# Upload artifacts
file_a = tmp_path.joinpath("a.txt")
file_a.write_text("0")
upload_file(file_a, f"{default_artifact_root}/a.txt")
assert os.path.exists(os.path.join(artifacts_destination, "a.txt"))
assert read_file(os.path.join(artifacts_destination, "a.txt")) == "0"
file_b = tmp_path.joinpath("b.txt")
file_b.write_text("1")
upload_file(file_b, f"{default_artifact_root}/dir/b.txt")
assert os.path.join(artifacts_destination, "dir", "b.txt")
assert read_file(os.path.join(artifacts_destination, "dir", "b.txt")) == "1"
# Download artifacts
local_dir = tmp_path.joinpath("folder")
local_dir.mkdir()
local_path_a = local_dir.joinpath("a.txt")
download_file(f"{default_artifact_root}/a.txt", local_path_a)
assert read_file(local_path_a) == "0"
local_path_b = local_dir.joinpath("b.txt")
download_file(f"{default_artifact_root}/dir/b.txt", local_path_b)
assert read_file(local_path_b) == "1"
# List artifacts
resp = requests.get(default_artifact_root)
assert resp.json() == {
"files": [
{"path": "a.txt", "is_dir": False, "file_size": 1},
{"path": "dir", "is_dir": True},
]
}
resp = requests.get(default_artifact_root, params={"path": "dir"})
assert resp.json() == {"files": [{"path": "b.txt", "is_dir": False, "file_size": 1}]}
def test_log_artifact(artifacts_server, tmp_path):
url = artifacts_server.url
artifacts_destination = artifacts_server.artifacts_destination
mlflow.set_tracking_uri(url)
tmp_path = tmp_path.joinpath("a.txt")
tmp_path.write_text("0")
with mlflow.start_run() as run:
mlflow.log_artifact(tmp_path)
experiment_id = "0"
run_artifact_root = os.path.join(
artifacts_destination, experiment_id, run.info.run_id, "artifacts"
)
dest_path = os.path.join(run_artifact_root, tmp_path.name)
assert os.path.exists(dest_path)
assert read_file(dest_path) == "0"
with mlflow.start_run() as run:
mlflow.log_artifact(tmp_path, artifact_path="artifact_path")
run_artifact_root = os.path.join(
artifacts_destination, experiment_id, run.info.run_id, "artifacts"
)
dest_path = os.path.join(run_artifact_root, "artifact_path", tmp_path.name)
assert os.path.exists(dest_path)
assert read_file(dest_path) == "0"
def test_log_artifacts(artifacts_server, tmp_path):
url = artifacts_server.url
mlflow.set_tracking_uri(url)
tmp_path.joinpath("a.txt").write_text("0")
d = tmp_path.joinpath("dir")
d.mkdir()
d.joinpath("b.txt").write_text("1")
with mlflow.start_run() as run:
mlflow.log_artifacts(tmp_path)
client = MlflowClient()
artifacts = [a.path for a in client.list_artifacts(run.info.run_id)]
assert sorted(artifacts) == ["a.txt", "dir"]
artifacts = [a.path for a in client.list_artifacts(run.info.run_id, "dir")]
assert artifacts == ["dir/b.txt"]
# With `artifact_path`
with mlflow.start_run() as run:
mlflow.log_artifacts(tmp_path, artifact_path="artifact_path")
artifacts = [a.path for a in client.list_artifacts(run.info.run_id)]
assert artifacts == ["artifact_path"]
artifacts = [a.path for a in client.list_artifacts(run.info.run_id, "artifact_path")]
assert sorted(artifacts) == ["artifact_path/a.txt", "artifact_path/dir"]
artifacts = [a.path for a in client.list_artifacts(run.info.run_id, "artifact_path/dir")]
assert artifacts == ["artifact_path/dir/b.txt"]
def test_list_artifacts(artifacts_server, tmp_path):
url = artifacts_server.url
mlflow.set_tracking_uri(url)
tmp_path_a = tmp_path.joinpath("a.txt")
tmp_path_a.write_text("0")
tmp_path_b = tmp_path.joinpath("b.txt")
tmp_path_b.write_text("1")
client = MlflowClient()
with mlflow.start_run() as run:
assert client.list_artifacts(run.info.run_id) == []
mlflow.log_artifact(tmp_path_a)
mlflow.log_artifact(tmp_path_b, "dir")
artifacts = [a.path for a in client.list_artifacts(run.info.run_id)]
assert sorted(artifacts) == ["a.txt", "dir"]
artifacts = [a.path for a in client.list_artifacts(run.info.run_id, "dir")]
assert artifacts == ["dir/b.txt"]
def test_download_artifacts(artifacts_server, tmp_path):
url = artifacts_server.url
mlflow.set_tracking_uri(url)
tmp_path_a = tmp_path.joinpath("a.txt")
tmp_path_a.write_text("0")
tmp_path_b = tmp_path.joinpath("b.txt")
tmp_path_b.write_text("1")
with mlflow.start_run() as run:
mlflow.log_artifact(tmp_path_a)
mlflow.log_artifact(tmp_path_b, "dir")
dest_path = download_artifacts(run_id=run.info.run_id, artifact_path="")
assert sorted(os.listdir(dest_path)) == ["a.txt", "dir"]
assert read_file(os.path.join(dest_path, "a.txt")) == "0"
dest_path = download_artifacts(run_id=run.info.run_id, artifact_path="dir")
assert os.listdir(dest_path) == ["b.txt"]
assert read_file(os.path.join(dest_path, "b.txt")) == "1"
def is_github_actions():
return "GITHUB_ACTIONS" in os.environ
@pytest.mark.skipif(is_windows(), reason="This example doesn't work on Windows")
def test_mlflow_artifacts_example(tmp_path):
root = pathlib.Path(mlflow.__file__).parents[1]
# On GitHub Actions, remove generated images to save disk space
rmi_option = "--rmi all" if is_github_actions() else ""
cmd = f"""
err=0
trap 'err=1' ERR
./build.sh
docker compose run -v ${{PWD}}/example.py:/app/example.py client python example.py
docker compose logs
docker compose down {rmi_option} --volumes --remove-orphans
test $err = 0
"""
script_path = tmp_path.joinpath("test.sh")
script_path.write_text(cmd)
subprocess.run(
["bash", script_path],
check=True,
cwd=os.path.join(root, "examples", "mlflow_artifacts"),
)
def test_rest_tracking_api_list_artifacts_with_proxied_artifacts(artifacts_server, tmp_path):
def list_artifacts_via_rest_api(url, run_id, path=None):
if path:
resp = requests.get(url, params={"run_id": run_id, "path": path})
else:
resp = requests.get(url, params={"run_id": run_id})
resp.raise_for_status()
return resp.json()
url = artifacts_server.url
mlflow.set_tracking_uri(url)
api = f"{url}/api/2.0/mlflow/artifacts/list"
tmp_path_a = tmp_path.joinpath("a.txt")
tmp_path_a.write_text("0")
tmp_path_b = tmp_path.joinpath("b.txt")
tmp_path_b.write_text("1")
mlflow.set_experiment("rest_list_api_test")
with mlflow.start_run() as run:
mlflow.log_artifact(tmp_path_a)
mlflow.log_artifact(tmp_path_b, "dir")
list_artifacts_response = list_artifacts_via_rest_api(url=api, run_id=run.info.run_id)
assert list_artifacts_response.get("files") == [
{"path": "a.txt", "is_dir": False, "file_size": 1},
{"path": "dir", "is_dir": True},
]
assert list_artifacts_response.get("root_uri") == run.info.artifact_uri
nested_list_artifacts_response = list_artifacts_via_rest_api(
url=api, run_id=run.info.run_id, path="dir"
)
assert nested_list_artifacts_response.get("files") == [
{"path": "dir/b.txt", "is_dir": False, "file_size": 1},
]
assert list_artifacts_response.get("root_uri") == run.info.artifact_uri
def test_rest_get_artifact_api_proxied_with_artifacts(artifacts_server, tmp_path):
url = artifacts_server.url
mlflow.set_tracking_uri(url)
tmp_path_a = tmp_path.joinpath("a.txt")
tmp_path_a.write_text("abcdefg")
mlflow.set_experiment("rest_get_artifact_api_test")
with mlflow.start_run() as run:
mlflow.log_artifact(tmp_path_a)
get_artifact_response = requests.get(
url=f"{url}/get-artifact", params={"run_id": run.info.run_id, "path": "a.txt"}
)
get_artifact_response.raise_for_status()
assert get_artifact_response.text == "abcdefg"
def test_rest_get_model_version_artifact_api_proxied_artifact_root(artifacts_server):
url = artifacts_server.url
artifact_file = pathlib.Path(artifacts_server.artifacts_destination, "a.txt")
artifact_file.parent.mkdir(exist_ok=True, parents=True)
artifact_file.write_text("abcdefg")
name = "GetModelVersionTest"
mlflow_client = MlflowClient(artifacts_server.backend_store_uri)
mlflow_client.create_registered_model(name)
# An artifact root with scheme http, https, or mlflow-artifacts is a proxied artifact root
mlflow_client.create_model_version(name, "mlflow-artifacts:", 1)
get_model_version_artifact_response = requests.get(
url=f"{url}/model-versions/get-artifact",
params={"name": name, "version": "1", "path": "a.txt"},
)
get_model_version_artifact_response.raise_for_status()
assert get_model_version_artifact_response.text == "abcdefg"
@pytest.mark.parametrize(
("filename", "expected_mime_type"),
[
("a.txt", "text/plain"),
("b.pkl", "application/octet-stream"),
("c.png", "image/png"),
("d.pdf", "application/pdf"),
("MLmodel", "text/plain"),
("mlproject", "text/plain"),
],
)
def test_mime_type_for_download_artifacts_api(
artifacts_server, tmp_path, filename, expected_mime_type
):
default_artifact_root = artifacts_server.default_artifact_root
url = artifacts_server.url
test_file = tmp_path.joinpath(filename)
test_file.touch()
upload_file(test_file, f"{default_artifact_root}/dir/{filename}")
download_response = download_file(f"{default_artifact_root}/dir/{filename}", test_file)
_, params = cgi.parse_header(download_response.headers["Content-Disposition"])
assert params["filename"] == filename
assert download_response.headers["Content-Type"] == expected_mime_type
mlflow.set_tracking_uri(url)
with mlflow.start_run() as run:
mlflow.log_artifact(test_file)
artifact_response = requests.get(
url=f"{url}/get-artifact", params={"run_id": run.info.run_id, "path": filename}
)
artifact_response.raise_for_status()
_, params = cgi.parse_header(artifact_response.headers["Content-Disposition"])
assert params["filename"] == filename
assert artifact_response.headers["Content-Type"] == expected_mime_type
assert artifact_response.headers["X-Content-Type-Options"] == "nosniff"
def test_rest_get_artifact_api_log_image(artifacts_server):
url = artifacts_server.url
mlflow.set_tracking_uri(url)
import numpy as np
from PIL import Image
image = np.random.randint(0, 256, size=(100, 100, 3), dtype=np.uint8)
with mlflow.start_run() as run:
mlflow.log_image(image, key="dog", step=100, timestamp=100, synchronous=True)
artifact_list_response = requests.get(
url=f"{url}/ajax-api/2.0/mlflow/artifacts/list",
params={"path": "images", "run_id": run.info.run_id},
)
artifact_list_response.raise_for_status()
for file in artifact_list_response.json()["files"]:
path = file["path"]
get_artifact_response = requests.get(
url=f"{url}/get-artifact", params={"run_id": run.info.run_id, "path": path}
)
get_artifact_response.raise_for_status()
assert (
"attachment; filename=dog%step%100%timestamp%100"
in get_artifact_response.headers["Content-Disposition"]
)
if path.endswith("png"):
loaded_image = np.asarray(
Image.open(BytesIO(get_artifact_response.content)), dtype=np.uint8
)
np.testing.assert_array_equal(loaded_image, image)
@pytest.mark.parametrize(
("filename", "requested_mime_type", "responded_mime_type"),
[
("b.pkl", "text/html", "application/octet-stream"),
("c.png", "text/html", "image/png"),
("d.pdf", "text/html", "application/pdf"),
],
)
def test_server_overrides_requested_mime_type(
artifacts_server, tmp_path, filename, requested_mime_type, responded_mime_type
):
default_artifact_root = artifacts_server.default_artifact_root
test_file = tmp_path.joinpath(filename)
test_file.touch()
upload_file(
test_file,
f"{default_artifact_root}/dir/{filename}",
)
download_response = download_file(
f"{default_artifact_root}/dir/{filename}",
test_file,
headers={"Accept": requested_mime_type},
)
_, params = cgi.parse_header(download_response.headers["Content-Disposition"])
assert params["filename"] == filename
assert download_response.headers["Content-Type"] == responded_mime_type
|
ArtifactsServer
|
python
|
sympy__sympy
|
sympy/assumptions/predicates/calculus.py
|
{
"start": 1646,
"end": 1903
}
|
class ____(Predicate):
"""
Positive infinity predicate.
``Q.positive_infinite(x)`` is true iff ``x`` is positive infinity ``oo``.
"""
name = 'positive_infinite'
handler = Dispatcher("PositiveInfiniteHandler")
|
PositiveInfinitePredicate
|
python
|
PyCQA__pylint
|
pylint/pyreverse/inspector.py
|
{
"start": 1719,
"end": 2642
}
|
class ____:
"""A project handle a set of modules / packages."""
def __init__(self, name: str = ""):
self.name = name
self.uid: int | None = None
self.path: str = ""
self.modules: list[nodes.Module] = []
self.locals: dict[str, nodes.Module] = {}
self.__getitem__ = self.locals.__getitem__
self.__iter__ = self.locals.__iter__
self.values = self.locals.values
self.keys = self.locals.keys
self.items = self.locals.items
def add_module(self, node: nodes.Module) -> None:
self.locals[node.name] = node
self.modules.append(node)
def get_module(self, name: str) -> nodes.Module:
return self.locals[name]
def get_children(self) -> list[nodes.Module]:
return self.modules
def __repr__(self) -> str:
return f"<Project {self.name!r} at {id(self)} ({len(self.modules)} modules)>"
|
Project
|
python
|
realpython__materials
|
python-serialize/http-payload/fastapi-rest-api/main.py
|
{
"start": 187,
"end": 612
}
|
class ____(UserIn):
id: UUID = Field(default_factory=uuid4)
created_at: datetime = Field(default_factory=datetime.now)
users = [
UserOut(name="Alice"),
UserOut(name="Bob"),
]
@app.get("/users")
async def get_users():
return users
@app.post("/users", status_code=201)
async def create_user(user_in: UserIn):
user_out = UserOut(name=user_in.name)
users.append(user_out)
return user_out
|
UserOut
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/flowchart/library/Data.py
|
{
"start": 12801,
"end": 13164
}
|
class ____(CtrlNode):
"""Calculate the minimum of an array across an axis.
"""
nodeName = 'Min'
uiTemplate = [
('axis', 'intSpin', {'value': 0, 'min': -1, 'max': 1000000}),
]
def processData(self, data):
s = self.stateGroup.state()
ax = None if s['axis'] == -1 else s['axis']
return data.min(axis=ax)
|
Min
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-maximum-length-of-a-good-subsequence-i.py
|
{
"start": 63,
"end": 665
}
|
class ____(object):
def maximumLength(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
lookup = {x:i for i, x in enumerate(set(nums))}
dp = [[0]*len(lookup) for _ in xrange(k+1)]
result = [0]*(k+1)
for x in nums:
x = lookup[x]
for i in reversed(xrange(k+1)):
dp[i][x] = max(dp[i][x], result[i-1] if i-1 >= 0 else 0)+1
result[i] = max(result[i], dp[i][x])
return result[k]
# Time: O(n * k)
# Space: O(n * k)
import collections
# dp
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/math_ops_test.py
|
{
"start": 49967,
"end": 51418
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_all_in_graph_and_eager_modes
def testEqualityNone(self):
x = constant_op.constant([1.0, 2.0, 0.0, 4.0], dtype=dtypes.float32)
self.assertNotEqual(x, None)
self.assertNotEqual(None, x)
self.assertFalse(math_ops.tensor_equals(x, None))
self.assertTrue(math_ops.tensor_not_equals(x, None))
@parameterized.named_parameters(
(f"-is_equals={is_equals}-float_literal_type={type(float_literal)}" # pylint: disable=g-complex-comprehension
f"-float_literal={float_literal}", is_equals, float_literal)
for float_literal in [4.6, np.float32(4.6), 4.4, np.float32(4.4)]
for is_equals in [True, False])
def testEqualityNoDowncast(self, is_equals, float_literal):
if (tf2.enabled() and isinstance(float_literal, np.float32) or
not tf2.enabled() and isinstance(float_literal, float)):
# TODO(b/199262800): Remove this skip
self.skipTest("There is a bug in type promotion.")
if is_equals:
op = math_ops.tensor_equals
else:
op = math_ops.tensor_not_equals
x = constant_op.constant(4)
try:
result = op(x, float_literal)
if isinstance(result, tensor_lib.Tensor):
result = self.evaluate(result)
except TypeError:
# Throwing a TypeError is OK
return
self.assertEqual(result, not is_equals)
@test_util.run_all_in_graph_and_eager_modes
|
EqualityTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/shortest-matching-substring.py
|
{
"start": 1777,
"end": 3112
}
|
class ____(object):
def shortestMatchingSubstring(self, s, p):
"""
:type s: str
:type p: str
:rtype: int
"""
INF = float("inf")
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
a, b, c = p.split('*')
n = len(s)
la, lb, lc = len(a), len(b), len(c)
prefix1 = getPrefix(a+'#'+s)
prefix2 = getPrefix(b+'#'+s)
prefix3 = getPrefix(c+'#'+s)
result = INF
i = j = k = 0
while i+lb+lc < n:
while i < n and prefix1[la+1+i]+1 != la:
i += 1
if i == n:
break
while j < n and not (j >= i+lb and prefix2[lb+1+j]+1 == lb):
j += 1
if j == n:
break
while k < n and not (k >= j+lc and prefix3[lc+1+k]+1 == lc):
k += 1
if k == n:
break
result = min(result, k-(i-la))
i += 1
return result if result != INF else -1
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2/modular_qwen2.py
|
{
"start": 5542,
"end": 5770
}
|
class ____(LlamaDecoderLayer):
def __init__(self, config: Qwen2Config, layer_idx: int):
super().__init__(config=config, layer_idx=layer_idx)
self.attention_type = config.layer_types[layer_idx]
|
Qwen2DecoderLayer
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/no_self_use.py
|
{
"start": 703,
"end": 891
}
|
class ____(abc.ABC):
"""abstract class"""
@abstractmethod
def abstract_method(self):
"""abstract method could not be a function"""
raise NotImplementedError
|
Base
|
python
|
ansible__ansible
|
lib/ansible/modules/service.py
|
{
"start": 54413,
"end": 61981
}
|
class ____(Service):
"""
This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
and refresh for service control. Enabling a service is currently not supported.
Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
commands)
"""
platform = 'AIX'
distribution = None
def get_service_tools(self):
self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
if not self.lssrc_cmd:
self.module.fail_json(msg='unable to find lssrc binary')
self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
if not self.startsrc_cmd:
self.module.fail_json(msg='unable to find startsrc binary')
self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
if not self.stopsrc_cmd:
self.module.fail_json(msg='unable to find stopsrc binary')
self.refresh_cmd = self.module.get_bin_path('refresh', True)
if not self.refresh_cmd:
self.module.fail_json(msg='unable to find refresh binary')
def get_service_status(self):
status = self.get_aix_src_status()
# Only 'active' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'active':
self.running = True
else:
self.running = False
def get_aix_src_status(self):
# Check subsystem status
rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
if rc == 1:
# If check for subsystem is not ok, check if service name is a
# group subsystem
rc, stdout, stderr = self.execute_command("%s -g %s" % (self.lssrc_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
else:
# Check all subsystem status, if one subsystem is not active
# the group is considered not active.
lines = stdout.splitlines()
for state in lines[1:]:
if state.split()[-1].strip() != "active":
status = state.split()[-1].strip()
break
else:
status = "active"
# status is one of: active, inoperative
return status
else:
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[-1]
# status is one of: active, inoperative
return status
def service_control(self):
# Check if service name is a subsystem of a group subsystem
rc, stdout, stderr = self.execute_command("%s -a" % (self.lssrc_cmd))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
else:
lines = stdout.splitlines()
subsystems = []
groups = []
for line in lines[1:]:
subsystem = line.split()[0].strip()
group = line.split()[1].strip()
subsystems.append(subsystem)
if group:
groups.append(group)
# Define if service name parameter:
# -s subsystem or -g group subsystem
if self.name in subsystems:
srccmd_parameter = "-s"
elif self.name in groups:
srccmd_parameter = "-g"
if self.action == 'start':
srccmd = self.startsrc_cmd
elif self.action == 'stop':
srccmd = self.stopsrc_cmd
elif self.action == 'reload':
srccmd = self.refresh_cmd
elif self.action == 'restart':
self.execute_command("%s %s %s" % (self.stopsrc_cmd, srccmd_parameter, self.name))
if self.sleep:
time.sleep(self.sleep)
srccmd = self.startsrc_cmd
if self.arguments and self.action in ('start', 'restart'):
return self.execute_command("%s -a \"%s\" %s %s" % (srccmd, self.arguments, srccmd_parameter, self.name))
else:
return self.execute_command("%s %s %s" % (srccmd, srccmd_parameter, self.name))
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['started', 'stopped', 'reloaded', 'restarted']),
sleep=dict(type='int'),
pattern=dict(type='str'),
enabled=dict(type='bool'),
runlevel=dict(type='str', default='default'),
arguments=dict(type='str', default='', aliases=['args']),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
service = Service(module)
module.debug('Service instantiated - platform %s' % service.platform)
if service.distribution:
module.debug('Service instantiated - distribution %s' % service.distribution)
rc = 0
out = ''
err = ''
result = {}
result['name'] = service.name
# Find service management tools
service.get_service_tools()
# Enable/disable service startup at boot if requested
if service.module.params['enabled'] is not None:
# FIXME: ideally this should detect if we need to toggle the enablement state, though
# it's unlikely the changed handler would need to fire in this case so it's a minor thing.
service.service_enable()
result['enabled'] = service.enable
if module.params['state'] is None:
# Not changing the running state, so bail out now.
result['changed'] = service.changed
module.exit_json(**result)
result['state'] = service.state
# Collect service status
if service.pattern:
service.check_ps()
else:
service.get_service_status()
# Calculate if request will change service state
service.check_service_changed()
# Modify service state if necessary
(rc, out, err) = service.modify_service_state()
if rc != 0:
if err and "Job is already running" in err:
# upstart got confused, one such possibility is MySQL on Ubuntu 12.04
# where status may report it has no start/stop links and we could
# not get accurate status
pass
else:
if err:
module.fail_json(msg=err)
else:
module.fail_json(msg=out)
result['changed'] = service.changed | service.svc_change
if service.module.params['enabled'] is not None:
result['enabled'] = service.module.params['enabled']
if not service.module.params['state']:
status = service.get_service_status()
if status is None:
result['state'] = 'absent'
elif status is False:
result['state'] = 'started'
else:
result['state'] = 'stopped'
else:
# as we may have just bounced the service the service command may not
# report accurate state at this moment so just show what we ran
if service.module.params['state'] in ['reloaded', 'restarted', 'started']:
result['state'] = 'started'
else:
result['state'] = 'stopped'
module.exit_json(**result)
if __name__ == '__main__':
main()
|
AIX
|
python
|
spyder-ide__spyder
|
spyder/plugins/profiler/widgets/main_widget.py
|
{
"start": 847,
"end": 1309
}
|
class ____:
# Triggers
Clear = 'clear_action'
Collapse = 'collapse_action'
Expand = 'expand_action'
CallersOrCallees = "callers_or_callees_action"
ToggleBuiltins = "toggle_builtins_action"
Home = "HomeAction"
SlowLocal = 'slow_local_action'
LoadData = 'load_data_action'
SaveData = 'save_data_action'
Search = "find_action"
Undo = "undo_action"
Redo = "redo_action"
Stop = "stop_action"
|
ProfilerWidgetActions
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/lib/modulegraph/modulegraph.py
|
{
"start": 23279,
"end": 23473
}
|
class ____(Node):
def __init__(self, filename):
super(Script, self).__init__(filename)
self.filename = filename
def infoTuple(self):
return (self.filename,)
|
Script
|
python
|
great-expectations__great_expectations
|
great_expectations/metrics/metric.py
|
{
"start": 517,
"end": 755
}
|
class ____(TypeError):
def __init__(self, class_name: str, mixin_superclass_name: str) -> None:
super().__init__(
f"`{class_name}` must use a single `{mixin_superclass_name}` subclass mixin."
)
|
MixinTypeError
|
python
|
astropy__astropy
|
astropy/table/tests/test_init_table.py
|
{
"start": 10922,
"end": 12500
}
|
class ____(BaseInitFromDictLike):
def _setup(self, table_type):
self.data = np.array(
[(1, 2, 3), (3, 4, 5)], dtype=[("x", "i8"), ("y", "i4"), ("z", "i8")]
)
def test_ndarray_ref(self, table_type):
"""Init with ndarray and copy=False and show that table uses reference
to input ndarray"""
self._setup(table_type)
t = table_type(self.data, copy=False)
t["x"][1] = 0 # Column-wise assignment
t[0]["y"] = 0 # Row-wise assignment
assert self.data["x"][1] == 0
assert self.data["y"][0] == 0
assert np.all(np.array(t) == self.data)
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_dtype(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], dtype=["f4", None, "f8"])
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.float32
assert t["y"].dtype.type == np.int32
assert t["d"].dtype.type == np.float64
assert all(t[name].name == name for name in t.colnames)
def test_partial_names_ref(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=["e", None, "d"], copy=False)
assert t.colnames == ["e", "y", "d"]
assert t["e"].dtype.type == np.int64
assert t["y"].dtype.type == np.int32
assert t["d"].dtype.type == np.int64
assert all(t[name].name == name for name in t.colnames)
@pytest.mark.usefixtures("table_type")
|
TestInitFromNdarrayStruct
|
python
|
pypa__setuptools
|
setuptools/_vendor/wheel/vendored/packaging/specifiers.py
|
{
"start": 2843,
"end": 26453
}
|
class ____(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
@property # type: ignore[override]
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = _version_join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by bangs and dots, and pretend that there is
# an implicit dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by bangs and dots, and pretend
# that there is an implicit dot in between a release segment and
# a pre-release segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> List[str]:
"""Split version into components.
The split components are intended for version comparison. The logic does
not attempt to retain the original version string, so joining the
components back with :func:`_version_join` may not produce the original
version string.
"""
result: List[str] = []
epoch, _, rest = version.rpartition("!")
result.append(epoch or "0")
for item in rest.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _version_join(components: List[str]) -> str:
"""Join split version components into a version string.
This function assumes the input came from :func:`_version_split`, where the
first component must be the epoch (either empty or numeric), and all other
components numeric.
"""
epoch, *rest = components
return f"{epoch}!{'.'.join(rest)}"
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (
list(itertools.chain.from_iterable(left_split)),
list(itertools.chain.from_iterable(right_split)),
)
|
Specifier
|
python
|
aio-libs__aiohttp
|
aiohttp/web_exceptions.py
|
{
"start": 6285,
"end": 6424
}
|
class ____(HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
|
HTTPFound
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
|
{
"start": 1121,
"end": 1442
}
|
class ____(linear_operator_addition._Adder):
"""Adder that will fail if used."""
def can_add(self, op1, op2):
raise AssertionError("BadAdder.can_add called!")
def _add(self, op1, op2, operator_name, hints):
raise AssertionError("This line should not be reached")
# pylint: enable=unused-argument
|
_BadAdder
|
python
|
Pylons__pyramid
|
src/pyramid/authentication.py
|
{
"start": 42497,
"end": 43451
}
|
class ____:
"""A helper for use with a :term:`security policy` which stores user data
in the configured :term:`session`.
Constructor Arguments
``prefix``
A prefix used when storing the authentication parameters in the
session. Defaults to 'auth.'. Optional.
"""
def __init__(self, prefix='auth.'):
self.userid_key = prefix + 'userid'
def remember(self, request, userid, **kw):
"""Store a userid in the session."""
request.session[self.userid_key] = userid
return []
def forget(self, request, **kw):
"""Remove the stored userid from the session."""
if self.userid_key in request.session:
del request.session[self.userid_key]
return []
def authenticated_userid(self, request):
"""Return the stored userid."""
return request.session.get(self.userid_key)
@implementer(IAuthenticationPolicy)
|
SessionAuthenticationHelper
|
python
|
pytorch__pytorch
|
benchmarks/tensorexpr/elementwise.py
|
{
"start": 5619,
"end": 6981
}
|
class ____(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N):
super().__init__(mode, device, dtype)
self.N = N
self.data = self.rand(
[N], device=device, dtype=dtype, requires_grad=self.requires_grad
)
self.inputs = [self.data]
def forward(self, data):
a = data + 0.001
b = a + 0.002
return b
def reference(self):
binary_op = self.__class__.binary_op_np_func
unary_op = self.__class__.unary_op_np_func
[d1, d2, d3, d4] = [self.numpy(d) for d in [self.d1, self.d2, self.d3, self.d4]]
return self._eval(d1, d2, d3, d4, binary_op, unary_op)
def config(self):
return [self.N]
@staticmethod
def input_iterable():
return True
@classmethod
def module(cls):
return "simple_element"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 2
algorithmic_count = 2
else:
sol_count = 2
algorithmic_count = 2
buffer_size = self.N
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
@staticmethod
def default_configs():
return [[1 << 25]]
benchmark.register_benchmark_class(SimpleElementBench)
|
SimpleElementBench
|
python
|
spack__spack
|
lib/spack/spack/test/util/package_hash.py
|
{
"start": 11398,
"end": 11816
}
|
class ____:
def foo(self):
print("ONE")
@when("@1.0")
def foo(self):
print("TWO")
@when("@2.0")
@when(sys.platform == "darwin")
def foo(self):
print("THREE")
@when("@3.0")
def foo(self):
print("FOUR")
# this one should always stay
@run_after("install")
def some_function(self):
print("FIVE")
"""
more_dynamic_multimethods = """\
|
Pkg
|
python
|
instagram__MonkeyType
|
tests/util.py
|
{
"start": 1487,
"end": 2127
}
|
class ____:
class Inner:
def f(self) -> None:
pass
def transform_path(path: str) -> str:
"""Transform tests/test_foo.py to monkeytype.foo"""
path = 'monkeytype/' + path[len('tests/'):]
*basepath, file_name = path.split('/')
basename, _ext = os.path.splitext(file_name[len('test_'):])
return '.'.join(basepath + [basename])
def smartcov_paths_hook(paths: List[str]) -> List[str]:
"""Given list of test files to run, return modules to measure coverage of."""
if not paths:
return ['monkeytype']
return [
transform_path(path)
for path
in paths
]
|
Outer
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/python_registry.py
|
{
"start": 653,
"end": 8585
}
|
class ____(Step):
context: PythonRegistryPublishContext
title = "Publish package to python registry"
max_retries = 3
def _get_base_container(self) -> Container:
return with_poetry(self.context)
async def _get_package_metadata_from_pyproject_toml(self, package_dir_to_publish: Directory) -> Optional[PythonPackageMetadata]:
pyproject_toml = package_dir_to_publish.file(PYPROJECT_TOML_FILE_PATH)
pyproject_toml_content = await pyproject_toml.contents()
contents = tomllib.loads(pyproject_toml_content)
try:
return PythonPackageMetadata(contents["tool"]["poetry"]["name"], contents["tool"]["poetry"]["version"])
except KeyError:
return None
async def _get_package_type(self, package_dir_to_publish: Directory) -> Optional[PackageType]:
files = await package_dir_to_publish.entries()
has_pyproject_toml = PYPROJECT_TOML_FILE_PATH in files
has_setup_py = SETUP_PY_FILE_PATH in files
if has_pyproject_toml:
return PackageType.POETRY
elif has_setup_py:
return PackageType.PIP
else:
return None
async def _run(self) -> StepResult:
package_dir_to_publish = await self.context.get_repo_dir(self.context.package_path)
package_type = await self._get_package_type(package_dir_to_publish)
if not package_type:
return self.skip("Connector does not have a pyproject.toml file or setup.py file, skipping.")
result = await self._ensure_package_name_and_version(package_dir_to_publish, package_type)
if result:
return result
self.logger.info(
f"Uploading package {self.context.package_metadata.name} version {self.context.package_metadata.version} to {self.context.registry}..."
)
return await self._publish(package_dir_to_publish, package_type)
async def _ensure_package_name_and_version(self, package_dir_to_publish: Directory, package_type: PackageType) -> Optional[StepResult]:
"""
Try to infer package name and version from the pyproject.toml file. If it is not present, we need to have the package name and version set.
Setup.py packages need to set package name and version as parameter.
Returns None if package name and version are set, otherwise a StepResult with a skip message.
"""
if self.context.package_metadata.name and self.context.package_metadata.version:
return None
if package_type is not PackageType.POETRY:
return self.skip("Connector does not have a pyproject.toml file and version and package name is not set otherwise, skipping.")
inferred_package_metadata = await self._get_package_metadata_from_pyproject_toml(package_dir_to_publish)
if not inferred_package_metadata:
return self.skip(
"Connector does not have a pyproject.toml file which specifies package name and version and they are not set otherwise, skipping."
)
if not self.context.package_metadata.name:
self.context.package_metadata.name = inferred_package_metadata.name
if not self.context.package_metadata.version:
self.context.package_metadata.version = inferred_package_metadata.version
return None
async def _publish(self, package_dir_to_publish: Directory, package_type: PackageType) -> StepResult:
if package_type is PackageType.PIP:
return await self._pip_publish(package_dir_to_publish)
else:
return await self._poetry_publish(package_dir_to_publish)
async def _poetry_publish(self, package_dir_to_publish: Directory) -> StepResult:
pyproject_toml = package_dir_to_publish.file(PYPROJECT_TOML_FILE_PATH)
pyproject_toml_content = await pyproject_toml.contents()
contents = tomllib.loads(pyproject_toml_content)
# make sure package name and version are set to the configured one
contents["tool"]["poetry"]["name"] = self.context.package_metadata.name
contents["tool"]["poetry"]["version"] = self.context.package_metadata.version
# enforce consistent author
contents["tool"]["poetry"]["authors"] = ["Airbyte <contact@airbyte.io>"]
poetry_publish = (
self._get_base_container()
.with_secret_variable("PYTHON_REGISTRY_TOKEN", self.context.python_registry_token.as_dagger_secret(self.dagger_client))
.with_directory("package", package_dir_to_publish)
.with_workdir("package")
.with_new_file(PYPROJECT_TOML_FILE_PATH, contents=tomli_w.dumps(contents))
# Make sure these steps are always executed and not cached as they are triggering a side-effect (calling the registry)
# Env var setting needs to be in this block as well to make sure a change of the env var will be propagated correctly
.with_env_variable("CACHEBUSTER", str(uuid.uuid4()))
.with_exec(["poetry", "config", "repositories.mypypi", self.context.registry], use_entrypoint=True)
.with_exec(sh_dash_c(["poetry config pypi-token.mypypi $PYTHON_REGISTRY_TOKEN"]))
# Default timeout is set to 15 seconds
# We sometime face 443 HTTP read timeout responses from PyPi
# Setting it to 60 seconds to avoid transient publish failures
.with_env_variable("POETRY_REQUESTS_TIMEOUT", "60")
.with_exec(sh_dash_c(["poetry publish --build --repository mypypi -vvv --no-interaction"]))
)
return await self.get_step_result(poetry_publish)
async def _pip_publish(self, package_dir_to_publish: Directory) -> StepResult:
files = await package_dir_to_publish.entries()
metadata: Dict[str, str] = {
"name": str(self.context.package_metadata.name),
"version": str(self.context.package_metadata.version),
# Enforce consistent author
"author": "Airbyte",
"author_email": "contact@airbyte.io",
}
if "README.md" in files:
metadata["long_description"] = await package_dir_to_publish.file("README.md").contents()
metadata["long_description_content_type"] = "text/markdown"
config = configparser.ConfigParser()
config["metadata"] = metadata
setup_cfg_io = io.StringIO()
config.write(setup_cfg_io)
setup_cfg = setup_cfg_io.getvalue()
twine_upload = (
self._get_base_container()
.with_exec(sh_dash_c(["apt-get update", "apt-get install -y twine"]))
.with_directory("package", package_dir_to_publish)
.with_workdir("package")
.with_exec(["sed", "-i", "/name=/d; /author=/d; /author_email=/d; /version=/d", SETUP_PY_FILE_PATH], use_entrypoint=True)
.with_new_file("setup.cfg", contents=setup_cfg)
.with_exec(["pip", "install", "--upgrade", "setuptools", "wheel"], use_entrypoint=True)
.with_exec(["python", SETUP_PY_FILE_PATH, "sdist", "bdist_wheel"], use_entrypoint=True)
# Make sure these steps are always executed and not cached as they are triggering a side-effect (calling the registry)
# Env var setting needs to be in this block as well to make sure a change of the env var will be propagated correctly
.with_env_variable("CACHEBUSTER", str(uuid.uuid4()))
.with_secret_variable("TWINE_USERNAME", self.context.dagger_client.set_secret("pypi_username", "__token__"))
.with_secret_variable("TWINE_PASSWORD", self.context.python_registry_token.as_dagger_secret(self.dagger_client))
.with_exec(["twine", "upload", "--verbose", "--repository-url", self.context.registry, "dist/*"], use_entrypoint=True)
)
return await self.get_step_result(twine_upload)
|
PublishToPythonRegistry
|
python
|
realpython__materials
|
dwitter-part-1/source_code_step_02/dwitter/models.py
|
{
"start": 75,
"end": 344
}
|
class ____(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
follows = models.ManyToManyField(
"self", related_name="followed_by", symmetrical=False, blank=True
)
def __str__(self):
return self.user.username
|
Profile
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/translate.py
|
{
"start": 6676,
"end": 37698
}
|
class ____(AbstractContextManager[None]):
__slots__ = ("_prev", "ctx", "translator")
def __init__(self, translator: Translator, ctx: ExecutionContext) -> None:
self.translator = translator
self.ctx = ctx
self._prev: ExecutionContext | None = None
def __enter__(self) -> None:
self._prev = self.translator._expr_context
self.translator._expr_context = self.ctx
def __exit__(self, *args: Any) -> None:
assert self._prev is not None
self.translator._expr_context = self._prev
@singledispatch
def _translate_ir(node: Any, translator: Translator, schema: Schema) -> ir.IR:
raise NotImplementedError(
f"Translation for {type(node).__name__}"
) # pragma: no cover
@_translate_ir.register
def _(node: plrs._ir_nodes.PythonScan, translator: Translator, schema: Schema) -> ir.IR:
scan_fn, with_columns, source_type, predicate, nrows = node.options
options = (scan_fn, with_columns, source_type, nrows)
predicate = (
translate_named_expr(translator, n=predicate, schema=schema)
if predicate is not None
else None
)
return ir.PythonScan(schema, options, predicate)
@_translate_ir.register
def _(node: plrs._ir_nodes.Scan, translator: Translator, schema: Schema) -> ir.IR:
typ, *options = node.scan_type
paths = node.paths
# Polars can produce a Scan with an empty ``node.paths`` (eg. the native
# Iceberg reader on a table with no data files yet). In this case, polars returns an
# empty DataFrame with the declared schema. Mirror that here by
# replacing the Scan with an Empty IR node.
if not paths: # pragma: no cover
return ir.Empty(schema)
if typ == "ndjson":
(reader_options,) = map(json.loads, options)
cloud_options = None
else:
reader_options, cloud_options = map(json.loads, options)
file_options = node.file_options
with_columns = file_options.with_columns
row_index = file_options.row_index
include_file_paths = file_options.include_file_paths
if not POLARS_VERSION_LT_131:
deletion_files = file_options.deletion_files # pragma: no cover
if deletion_files: # pragma: no cover
raise NotImplementedError(
"Iceberg format is not supported in cudf-polars. Furthermore, row-level deletions are not supported."
) # pragma: no cover
config_options = translator.config_options
parquet_options = config_options.parquet_options
pre_slice = file_options.n_rows
if pre_slice is None:
n_rows = -1
skip_rows = 0
else:
skip_rows, n_rows = pre_slice
if (n_rows == 2**32 - 1) or (n_rows == 2**64 - 1):
# Polars translates slice(10, None) -> (10, u32/64max)
n_rows = -1
return ir.Scan(
schema,
typ,
reader_options,
cloud_options,
paths,
with_columns,
skip_rows,
n_rows,
row_index,
include_file_paths,
translate_named_expr(translator, n=node.predicate, schema=schema)
if node.predicate is not None
else None,
parquet_options,
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Cache, translator: Translator, schema: Schema) -> ir.IR:
if POLARS_VERSION_LT_1323: # pragma: no cover
refcount = node.cache_hits
else:
refcount = None
# Make sure Cache nodes with the same id_
# are actually the same object.
if node.id_ not in translator._cache_nodes:
translator._cache_nodes[node.id_] = ir.Cache(
schema,
node.id_,
refcount,
translator.translate_ir(n=node.input),
)
return translator._cache_nodes[node.id_]
@_translate_ir.register
def _(
node: plrs._ir_nodes.DataFrameScan, translator: Translator, schema: Schema
) -> ir.IR:
return ir.DataFrameScan(
schema,
node.df,
node.projection,
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Select, translator: Translator, schema: Schema) -> ir.IR:
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
exprs = [
translate_named_expr(translator, n=e, schema=inp.schema) for e in node.expr
]
return ir.Select(schema, exprs, node.should_broadcast, inp)
@_translate_ir.register
def _(node: plrs._ir_nodes.GroupBy, translator: Translator, schema: Schema) -> ir.IR:
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
keys = [
translate_named_expr(translator, n=e, schema=inp.schema) for e in node.keys
]
with set_expr_context(translator, ExecutionContext.GROUPBY):
original_aggs = [
translate_named_expr(translator, n=e, schema=inp.schema)
for e in node.aggs
]
is_rolling = node.options.rolling is not None
is_dynamic = node.options.dynamic is not None
if is_dynamic:
raise NotImplementedError("group_by_dynamic")
elif is_rolling:
return rewrite_rolling(
node.options, schema, keys, original_aggs, translator.config_options, inp
)
else:
return rewrite_groupby(node, schema, keys, original_aggs, inp)
_DECIMAL_TYPES = {plc.TypeId.DECIMAL32, plc.TypeId.DECIMAL64, plc.TypeId.DECIMAL128}
def _align_decimal_scales(
left: expr.Expr, right: expr.Expr
) -> tuple[expr.Expr, expr.Expr]:
left_type, right_type = left.dtype, right.dtype
if plc.traits.is_fixed_point(left_type.plc_type) and plc.traits.is_fixed_point(
right_type.plc_type
):
target = DataType.common_decimal_dtype(left_type, right_type)
if (
left_type.id() != target.id() or left_type.scale() != target.scale()
): # pragma: no cover; no test yet
left = expr.Cast(target, left)
if (
right_type.id() != target.id() or right_type.scale() != target.scale()
): # pragma: no cover; no test yet
right = expr.Cast(target, right)
return left, right
@_translate_ir.register
def _(node: plrs._ir_nodes.Join, translator: Translator, schema: Schema) -> ir.IR:
# Join key dtypes are dependent on the schema of the left and
# right inputs, so these must be translated with the relevant
# input active.
with set_node(translator.visitor, node.input_left):
inp_left = translator.translate_ir(n=None)
left_on = [
translate_named_expr(translator, n=e, schema=inp_left.schema)
for e in node.left_on
]
with set_node(translator.visitor, node.input_right):
inp_right = translator.translate_ir(n=None)
right_on = [
translate_named_expr(translator, n=e, schema=inp_right.schema)
for e in node.right_on
]
if (how := node.options[0]) in {
"Inner",
"Left",
"Right",
"Full",
"Cross",
"Semi",
"Anti",
}:
return ir.Join(
schema,
left_on,
right_on,
node.options,
inp_left,
inp_right,
)
else:
how, op1, op2 = node.options[0]
if how != "IEJoin":
raise NotImplementedError(
f"Unsupported join type {how}"
) # pragma: no cover; asof joins not yet exposed
if op2 is None:
ops = [op1]
else:
ops = [op1, op2]
dtype = DataType(pl.datatypes.Boolean())
predicate = functools.reduce(
functools.partial(
expr.BinOp, dtype, plc.binaryop.BinaryOperator.LOGICAL_AND
),
(
expr.BinOp(
dtype,
expr.BinOp._MAPPING[op],
*_align_decimal_scales(
insert_colrefs(
left_ne.value,
table_ref=plc.expressions.TableReference.LEFT,
name_to_index={
name: i for i, name in enumerate(inp_left.schema)
},
),
insert_colrefs(
right_ne.value,
table_ref=plc.expressions.TableReference.RIGHT,
name_to_index={
name: i for i, name in enumerate(inp_right.schema)
},
),
),
)
for op, left_ne, right_ne in zip(ops, left_on, right_on, strict=True)
),
)
return ir.ConditionalJoin(schema, predicate, node.options, inp_left, inp_right)
@_translate_ir.register
def _(node: plrs._ir_nodes.HStack, translator: Translator, schema: Schema) -> ir.IR:
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
exprs = [
translate_named_expr(translator, n=e, schema=inp.schema) for e in node.exprs
]
return ir.HStack(schema, exprs, node.should_broadcast, inp)
@_translate_ir.register
def _(
node: plrs._ir_nodes.Reduce, translator: Translator, schema: Schema
) -> ir.IR: # pragma: no cover; polars doesn't emit this node yet
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
exprs = [
translate_named_expr(translator, n=e, schema=inp.schema) for e in node.expr
]
return ir.Reduce(schema, exprs, inp)
@_translate_ir.register
def _(node: plrs._ir_nodes.Distinct, translator: Translator, schema: Schema) -> ir.IR:
(keep, subset, maintain_order, zlice) = node.options
keep = ir.Distinct._KEEP_MAP[keep]
subset = frozenset(subset) if subset is not None else None
return ir.Distinct(
schema,
keep,
subset,
zlice,
maintain_order,
translator.translate_ir(n=node.input),
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Sort, translator: Translator, schema: Schema) -> ir.IR:
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
by = [
translate_named_expr(translator, n=e, schema=inp.schema)
for e in node.by_column
]
stable, nulls_last, descending = node.sort_options
order, null_order = sorting.sort_order(
descending, nulls_last=nulls_last, num_keys=len(by)
)
return ir.Sort(schema, by, order, null_order, stable, node.slice, inp)
@_translate_ir.register
def _(node: plrs._ir_nodes.Slice, translator: Translator, schema: Schema) -> ir.IR:
return ir.Slice(
schema, node.offset, node.len, translator.translate_ir(n=node.input)
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Filter, translator: Translator, schema: Schema) -> ir.IR:
with set_node(translator.visitor, node.input):
inp = translator.translate_ir(n=None)
mask = translate_named_expr(translator, n=node.predicate, schema=inp.schema)
return ir.Filter(schema, mask, inp)
@_translate_ir.register
def _(
node: plrs._ir_nodes.SimpleProjection, translator: Translator, schema: Schema
) -> ir.IR:
return ir.Projection(schema, translator.translate_ir(n=node.input))
@_translate_ir.register
def _(
node: plrs._ir_nodes.MergeSorted, translator: Translator, schema: Schema
) -> ir.IR:
key = node.key
inp_left = translator.translate_ir(n=node.input_left)
inp_right = translator.translate_ir(n=node.input_right)
return ir.MergeSorted(
schema,
key,
inp_left,
inp_right,
)
@_translate_ir.register
def _(
node: plrs._ir_nodes.MapFunction, translator: Translator, schema: Schema
) -> ir.IR:
name, *options = node.function
return ir.MapFunction(
schema,
name,
options,
translator.translate_ir(n=node.input),
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Union, translator: Translator, schema: Schema) -> ir.IR:
return ir.Union(
schema, node.options, *(translator.translate_ir(n=n) for n in node.inputs)
)
@_translate_ir.register
def _(node: plrs._ir_nodes.HConcat, translator: Translator, schema: Schema) -> ir.IR:
return ir.HConcat(
schema,
False, # noqa: FBT003
*(translator.translate_ir(n=n) for n in node.inputs),
)
@_translate_ir.register
def _(node: plrs._ir_nodes.Sink, translator: Translator, schema: Schema) -> ir.IR:
payload = json.loads(node.payload)
try:
file = payload["File"]
sink_kind_options = file["file_type"]
except KeyError as err: # pragma: no cover
raise NotImplementedError("Unsupported payload structure") from err
if isinstance(sink_kind_options, dict):
if len(sink_kind_options) != 1: # pragma: no cover; not sure if this can happen
raise NotImplementedError("Sink options dict with more than one entry.")
sink_kind, options = next(iter(sink_kind_options.items()))
else:
raise NotImplementedError(
"Unsupported sink options structure"
) # pragma: no cover
sink_options = file.get("sink_options", {})
cloud_options = file.get("cloud_options")
options.update(sink_options)
return ir.Sink(
schema=schema,
kind=sink_kind,
path=file["target"] if POLARS_VERSION_LT_132 else file["target"]["Local"],
parquet_options=translator.config_options.parquet_options,
options=options,
cloud_options=cloud_options,
df=translator.translate_ir(n=node.input),
)
def translate_named_expr(
translator: Translator, *, n: plrs._expr_nodes.PyExprIR, schema: Schema
) -> expr.NamedExpr:
"""
Translate a polars-internal named expression IR object into our representation.
Parameters
----------
translator
Translator object
n
Node to translate, a named expression node.
schema
Schema of the IR node this expression uses as evaluation context.
Returns
-------
Translated IR object.
Notes
-----
The datatype of the internal expression will be obtained from the
visitor by calling ``get_dtype``, for this to work properly, the
caller should arrange that the expression is translated with the
node that it references "active" for the visitor (see :class:`set_node`).
Raises
------
NotImplementedError
If any translation fails due to unsupported functionality.
"""
return expr.NamedExpr(
n.output_name, translator.translate_expr(n=n.node, schema=schema)
)
@singledispatch
def _translate_expr(
node: Any, translator: Translator, dtype: DataType, schema: Schema
) -> expr.Expr:
raise NotImplementedError(
f"Translation for {type(node).__name__}"
) # pragma: no cover
@_translate_expr.register
def _(
node: plrs._expr_nodes.Function,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
name, *options = node.function_data
options = tuple(options)
if isinstance(name, plrs._expr_nodes.StringFunction):
if name in {
plrs._expr_nodes.StringFunction.StripChars,
plrs._expr_nodes.StringFunction.StripCharsStart,
plrs._expr_nodes.StringFunction.StripCharsEnd,
}:
column, chars = (
translator.translate_expr(n=n, schema=schema) for n in node.input
)
if isinstance(chars, expr.Literal):
# We check for null first because we want to use the
# chars type, but it is invalid to try and
# produce a string scalar with a null dtype.
if chars.value is None:
# Polars uses None to mean "strip all whitespace"
chars = expr.Literal(column.dtype, "")
elif chars.value == "":
# No-op in polars, but libcudf uses empty string
# as signifier to remove whitespace.
return column
return expr.StringFunction(
dtype,
expr.StringFunction.Name.from_polars(name),
options,
column,
chars,
)
return expr.StringFunction(
dtype,
expr.StringFunction.Name.from_polars(name),
options,
*(translator.translate_expr(n=n, schema=schema) for n in node.input),
)
elif isinstance(name, plrs._expr_nodes.BooleanFunction):
if name == plrs._expr_nodes.BooleanFunction.IsBetween:
column, lo, hi = (
translator.translate_expr(n=n, schema=schema) for n in node.input
)
(closed,) = options
lop, rop = expr.BooleanFunction._BETWEEN_OPS[closed]
return expr.BinOp(
dtype,
plc.binaryop.BinaryOperator.LOGICAL_AND,
expr.BinOp(dtype, lop, column, lo),
expr.BinOp(dtype, rop, column, hi),
)
return expr.BooleanFunction(
dtype,
expr.BooleanFunction.Name.from_polars(name),
options,
*(translator.translate_expr(n=n, schema=schema) for n in node.input),
)
elif isinstance(name, plrs._expr_nodes.TemporalFunction):
# functions for which evaluation of the expression may not return
# the same dtype as polars, either due to libcudf returning a different
# dtype, or due to our internal processing affecting what libcudf returns
needs_cast = {
plrs._expr_nodes.TemporalFunction.Year,
plrs._expr_nodes.TemporalFunction.Month,
plrs._expr_nodes.TemporalFunction.Day,
plrs._expr_nodes.TemporalFunction.WeekDay,
plrs._expr_nodes.TemporalFunction.Hour,
plrs._expr_nodes.TemporalFunction.Minute,
plrs._expr_nodes.TemporalFunction.Second,
plrs._expr_nodes.TemporalFunction.Millisecond,
}
result_expr = expr.TemporalFunction(
dtype,
expr.TemporalFunction.Name.from_polars(name),
options,
*(translator.translate_expr(n=n, schema=schema) for n in node.input),
)
if name in needs_cast:
return expr.Cast(dtype, result_expr)
return result_expr
elif not POLARS_VERSION_LT_131 and isinstance(
name, plrs._expr_nodes.StructFunction
):
return expr.StructFunction(
dtype,
expr.StructFunction.Name.from_polars(name),
options,
*(translator.translate_expr(n=n, schema=schema) for n in node.input),
)
elif isinstance(name, str):
children = (translator.translate_expr(n=n, schema=schema) for n in node.input)
if name == "log" or (
not POLARS_VERSION_LT_133
and name == "l"
and isinstance(options[0], str)
and "".join((name, *options)) == "log"
):
if POLARS_VERSION_LT_133: # pragma: no cover
(base,) = options
(child,) = children
return expr.BinOp(
dtype,
plc.binaryop.BinaryOperator.LOG_BASE,
child,
expr.Literal(dtype, base),
)
else:
(child, base) = children
res = expr.BinOp(
dtype,
plc.binaryop.BinaryOperator.LOG_BASE,
child,
expr.Literal(dtype, base.value),
)
return (
res
if not POLARS_VERSION_LT_134
else expr.Cast(
DataType(pl.Float64()),
res,
)
)
elif name == "pow":
return expr.BinOp(dtype, plc.binaryop.BinaryOperator.POW, *children)
return expr.UnaryFunction(dtype, name, options, *children)
raise NotImplementedError(
f"No handler for Expr function node with {name=}"
) # pragma: no cover; polars raises on the rust side for now
@_translate_expr.register
def _(
node: plrs._expr_nodes.Window,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
if isinstance(node.options, plrs._expr_nodes.RollingGroupOptions):
# pl.col("a").rolling(...)
with set_expr_context(translator, ExecutionContext.ROLLING):
agg = translator.translate_expr(n=node.function, schema=schema)
name_generator = unique_names(schema)
aggs, named_post_agg = decompose_single_agg(
expr.NamedExpr(next(name_generator), agg),
name_generator,
is_top=True,
context=ExecutionContext.ROLLING,
)
named_aggs = [agg for agg, _ in aggs]
orderby = node.options.index_column
orderby_dtype = schema[orderby].plc_type
if plc.traits.is_integral(orderby_dtype):
# Integer orderby column is cast in implementation to int64 in polars
orderby_dtype = plc.DataType(plc.TypeId.INT64)
closed_window = node.options.closed_window
if isinstance(named_post_agg.value, expr.Col):
(named_agg,) = named_aggs
return expr.RollingWindow(
named_agg.value.dtype,
orderby_dtype,
node.options.offset,
node.options.period,
closed_window,
orderby,
named_agg.value,
)
replacements: dict[expr.Expr, expr.Expr] = {
expr.Col(agg.value.dtype, agg.name): expr.RollingWindow(
agg.value.dtype,
orderby_dtype,
node.options.offset,
node.options.period,
closed_window,
orderby,
agg.value,
)
for agg in named_aggs
}
return replace([named_post_agg.value], replacements)[0]
elif isinstance(node.options, plrs._expr_nodes.WindowMapping):
# pl.col("a").over(...)
with set_expr_context(translator, ExecutionContext.WINDOW):
agg = translator.translate_expr(n=node.function, schema=schema)
name_gen = unique_names(schema)
aggs, post = decompose_single_agg(
expr.NamedExpr(next(name_gen), agg),
name_gen,
is_top=True,
context=ExecutionContext.WINDOW,
)
mapping = node.options.kind
has_order_by = node.order_by is not None
descending = bool(getattr(node, "order_by_descending", False))
nulls_last = bool(getattr(node, "order_by_nulls_last", False))
if mapping != "groups_to_rows":
raise NotImplementedError(
f"over(mapping_strategy) not supported yet: {mapping=}; "
f"expected 'groups_to_rows'"
)
order_by_expr = (
translator.translate_expr(n=node.order_by, schema=schema)
if has_order_by
else None
)
named_aggs = [agg for agg, _ in aggs]
by_exprs = [
translator.translate_expr(n=n, schema=schema) for n in node.partition_by
]
child_deps = [
v.children[0]
for ne in named_aggs
for v in (ne.value,)
if isinstance(v, expr.Agg)
or (
isinstance(v, expr.UnaryFunction)
and v.name in {"rank", "fill_null_with_strategy", "cum_sum"}
)
]
children = (*by_exprs, *((order_by_expr,) if has_order_by else ()), *child_deps)
return expr.GroupedRollingWindow(
dtype,
(mapping, has_order_by, descending, nulls_last),
named_aggs,
post,
len(by_exprs),
*children,
)
assert_never(node.options)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Literal,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
if isinstance(node.value, plrs.PySeries):
return expr.LiteralColumn(dtype, pl.Series._from_pyseries(node.value))
if dtype.id() == plc.TypeId.LIST: # pragma: no cover
# TODO: Remove once pylibcudf.Scalar supports lists
return expr.LiteralColumn(dtype, pl.Series(node.value))
return expr.Literal(dtype, node.value)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Sort, translator: Translator, dtype: DataType, schema: Schema
) -> expr.Expr:
# TODO: raise in groupby
return expr.Sort(
dtype, node.options, translator.translate_expr(n=node.expr, schema=schema)
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.SortBy,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
options = node.sort_options
return expr.SortBy(
dtype,
(options[0], tuple(options[1]), tuple(options[2])),
translator.translate_expr(n=node.expr, schema=schema),
*(translator.translate_expr(n=n, schema=schema) for n in node.by),
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Slice,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
offset = translator.translate_expr(n=node.offset, schema=schema)
length = translator.translate_expr(n=node.length, schema=schema)
assert isinstance(offset, expr.Literal)
assert isinstance(length, expr.Literal)
return expr.Slice(
dtype,
offset.value,
length.value,
translator.translate_expr(n=node.input, schema=schema),
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Gather,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
return expr.Gather(
dtype,
translator.translate_expr(n=node.expr, schema=schema),
translator.translate_expr(n=node.idx, schema=schema),
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Filter,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
return expr.Filter(
dtype,
translator.translate_expr(n=node.input, schema=schema),
translator.translate_expr(n=node.by, schema=schema),
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Cast, translator: Translator, dtype: DataType, schema: Schema
) -> expr.Expr:
inner = translator.translate_expr(n=node.expr, schema=schema)
if plc.traits.is_floating_point(inner.dtype.plc_type) and plc.traits.is_fixed_point(
dtype.plc_type
):
return expr.Cast(
dtype,
expr.UnaryFunction(
inner.dtype, "round", (-dtype.plc_type.scale(), "half_to_even"), inner
),
)
# Push casts into literals so we can handle Cast(Literal(Null))
if isinstance(inner, expr.Literal):
return inner.astype(dtype)
elif isinstance(inner, expr.Cast):
# Translation of Len/Count-agg put in a cast, remove double
# casts if we have one.
(inner,) = inner.children
return expr.Cast(dtype, inner)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Column,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
return expr.Col(dtype, node.name)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Agg, translator: Translator, dtype: DataType, schema: Schema
) -> expr.Expr:
agg_name = node.name
args = [translator.translate_expr(n=arg, schema=schema) for arg in node.arguments]
if agg_name not in ("count", "n_unique", "mean", "median", "quantile"):
args = [
expr.Cast(dtype, arg)
if plc.traits.is_fixed_point(arg.dtype.plc_type)
and arg.dtype.plc_type != dtype.plc_type
else arg
for arg in args
]
value = expr.Agg(dtype, agg_name, node.options, translator._expr_context, *args)
if agg_name in ("count", "n_unique") and value.dtype.id() != plc.TypeId.INT32:
return expr.Cast(value.dtype, value)
return value
@_translate_expr.register
def _(
node: plrs._expr_nodes.Ternary,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
return expr.Ternary(
dtype,
translator.translate_expr(n=node.predicate, schema=schema),
translator.translate_expr(n=node.truthy, schema=schema),
translator.translate_expr(n=node.falsy, schema=schema),
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.BinaryExpr,
translator: Translator,
dtype: DataType,
schema: Schema,
) -> expr.Expr:
left = translator.translate_expr(n=node.left, schema=schema)
right = translator.translate_expr(n=node.right, schema=schema)
if (
POLARS_VERSION_LT_133
and plc.traits.is_boolean(dtype.plc_type)
and node.op == plrs._expr_nodes.Operator.TrueDivide
):
dtype = DataType(pl.Float64()) # pragma: no cover
if node.op == plrs._expr_nodes.Operator.TrueDivide and (
plc.traits.is_fixed_point(left.dtype.plc_type)
or plc.traits.is_fixed_point(right.dtype.plc_type)
):
f64 = DataType(pl.Float64())
return expr.Cast(
dtype,
expr.BinOp(
f64,
expr.BinOp._MAPPING[node.op],
expr.Cast(f64, left),
expr.Cast(f64, right),
),
)
if (
not POLARS_VERSION_LT_134
and node.op == plrs._expr_nodes.Operator.Multiply
and plc.traits.is_fixed_point(left.dtype.plc_type)
and plc.traits.is_fixed_point(right.dtype.plc_type)
):
left_scale = -left.dtype.plc_type.scale()
right_scale = -right.dtype.plc_type.scale()
out_scale = max(left_scale, right_scale)
return expr.UnaryFunction(
DataType(pl.Decimal(38, out_scale)),
"round",
(out_scale, "half_to_even"),
expr.BinOp(
DataType(pl.Decimal(38, left_scale + right_scale)),
expr.BinOp._MAPPING[node.op],
left,
right,
),
)
return expr.BinOp(
dtype,
expr.BinOp._MAPPING[node.op],
left,
right,
)
@_translate_expr.register
def _(
node: plrs._expr_nodes.Len, translator: Translator, dtype: DataType, schema: Schema
) -> expr.Expr:
value = expr.Len(dtype)
if dtype.id() != plc.TypeId.INT32:
return expr.Cast(dtype, value)
return value # pragma: no cover; never reached since polars len has uint32 dtype
|
set_expr_context
|
python
|
numba__numba
|
numba/core/cgutils.py
|
{
"start": 7039,
"end": 7419
}
|
class ____(_StructProxy):
"""
Create a StructProxy suitable for accessing regular values
(e.g. LLVM values or alloca slots).
"""
def _get_be_type(self, datamodel):
return datamodel.get_value_type()
def _cast_member_to_value(self, index, val):
return val
def _cast_member_from_value(self, index, val):
return val
|
ValueStructProxy
|
python
|
openai__openai-python
|
tests/api_resources/fine_tuning/jobs/test_checkpoints.py
|
{
"start": 2577,
"end": 4907
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
checkpoint = await async_client.fine_tuning.jobs.checkpoints.list(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
checkpoint = await async_client.fine_tuning.jobs.checkpoints.list(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="string",
limit=0,
)
assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.list(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
checkpoint = response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.list(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
checkpoint = await response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJobCheckpoint], checkpoint, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.checkpoints.with_raw_response.list(
"",
)
|
TestAsyncCheckpoints
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/optimizer_v1.py
|
{
"start": 12940,
"end": 16411
}
|
class ____(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Initial learning rate decay.
References:
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.lr = backend.variable(lr, name='lr')
self.decay = backend.variable(decay, name='decay')
self.iterations = backend.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = backend.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
accumulators = [backend.zeros(shape) for shape in shapes]
delta_accumulators = [backend.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
return accumulators, delta_accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
accumulators, delta_accumulators = self._create_all_weights(params)
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1. /
(1. +
self.decay * math_ops.cast(self.iterations,
backend.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * backend.sqrt(d_a + self.epsilon) / backend.sqrt(
new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(backend.get_value(self.lr)),
'rho': self.rho,
'decay': float(backend.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Adadelta
|
python
|
sympy__sympy
|
sympy/solvers/diophantine/diophantine.py
|
{
"start": 13785,
"end": 21970
}
|
class ____(DiophantineEquationType):
"""
Representation of a binary quadratic diophantine equation.
A binary quadratic diophantine equation is an equation of the
form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`, where `A, B, C, D, E,
F` are integer constants and `x` and `y` are integer variables.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine.diophantine import BinaryQuadratic
>>> b1 = BinaryQuadratic(x**3 + y**2 + 1)
>>> b1.matches()
False
>>> b2 = BinaryQuadratic(x**2 + y**2 + 2*x + 2*y + 2)
>>> b2.matches()
True
>>> b2.solve()
{(-1, -1)}
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0, [online],
Available: https://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: https://web.archive.org/web/20160323033111/http://www.jpr2718.org/ax2p.pdf
"""
name = 'binary_quadratic'
def matches(self):
return self.total_degree == 2 and self.dimension == 2
def solve(self, parameters=None, limit=None) -> DiophantineSolutionSet:
self.pre_solve(parameters)
var = self.free_symbols
coeff = self.coeff
x, y = var
A = coeff[x**2]
B = coeff[x*y]
C = coeff[y**2]
D = coeff[x]
E = coeff[y]
F = coeff[S.One]
A, B, C, D, E, F = [as_int(i) for i in _remove_gcd(A, B, C, D, E, F)]
# (1) Simple-Hyperbolic case: A = C = 0, B != 0
# In this case equation can be converted to (Bx + E)(By + D) = DE - BF
# We consider two cases; DE - BF = 0 and DE - BF != 0
# More details, https://www.alpertron.com.ar/METHODS.HTM#SHyperb
result = DiophantineSolutionSet(var, self.parameters)
t, u = result.parameters
discr = B**2 - 4*A*C
if A == 0 and C == 0 and B != 0:
if D*E - B*F == 0:
q, r = divmod(E, B)
if not r:
result.add((-q, t))
q, r = divmod(D, B)
if not r:
result.add((t, -q))
else:
div = divisors(D*E - B*F)
div = div + [-term for term in div]
for d in div:
x0, r = divmod(d - E, B)
if not r:
q, r = divmod(D*E - B*F, d)
if not r:
y0, r = divmod(q - D, B)
if not r:
result.add((x0, y0))
# (2) Parabolic case: B**2 - 4*A*C = 0
# There are two subcases to be considered in this case.
# sqrt(c)D - sqrt(a)E = 0 and sqrt(c)D - sqrt(a)E != 0
# More Details, https://www.alpertron.com.ar/METHODS.HTM#Parabol
elif discr == 0:
if A == 0:
s = BinaryQuadratic(self.equation, free_symbols=[y, x]).solve(parameters=[t, u])
for soln in s:
result.add((soln[1], soln[0]))
else:
g = sign(A)*igcd(A, C)
a = A // g
c = C // g
e = sign(B / A)
sqa = isqrt(a)
sqc = isqrt(c)
_c = e*sqc*D - sqa*E
if not _c:
z = Symbol("z", real=True)
eq = sqa*g*z**2 + D*z + sqa*F
roots = solveset_real(eq, z).intersect(S.Integers)
for root in roots:
ans = diop_solve(sqa*x + e*sqc*y - root)
result.add((ans[0], ans[1]))
elif int_valued(c):
solve_x = lambda u: -e*sqc*g*_c*t**2 - (E + 2*e*sqc*g*u)*t \
- (e*sqc*g*u**2 + E*u + e*sqc*F) // _c
solve_y = lambda u: sqa*g*_c*t**2 + (D + 2*sqa*g*u)*t \
+ (sqa*g*u**2 + D*u + sqa*F) // _c
for z0 in range(0, abs(_c)):
# Check if the coefficients of y and x obtained are integers or not
if (divisible(sqa*g*z0**2 + D*z0 + sqa*F, _c) and
divisible(e*sqc*g*z0**2 + E*z0 + e*sqc*F, _c)):
result.add((solve_x(z0), solve_y(z0)))
# (3) Method used when B**2 - 4*A*C is a square, is described in p. 6 of the below paper
# by John P. Robertson.
# https://web.archive.org/web/20160323033111/http://www.jpr2718.org/ax2p.pdf
elif is_square(discr):
if A != 0:
r = sqrt(discr)
u, v = symbols("u, v", integer=True)
eq = _mexpand(
4*A*r*u*v + 4*A*D*(B*v + r*u + r*v - B*u) +
2*A*4*A*E*(u - v) + 4*A*r*4*A*F)
solution = diop_solve(eq, t)
for s0, t0 in solution:
num = B*t0 + r*s0 + r*t0 - B*s0
x_0 = S(num) / (4*A*r)
y_0 = S(s0 - t0) / (2*r)
if isinstance(s0, Symbol) or isinstance(t0, Symbol):
if len(check_param(x_0, y_0, 4*A*r, parameters)) > 0:
ans = check_param(x_0, y_0, 4*A*r, parameters)
result.update(*ans)
elif x_0.is_Integer and y_0.is_Integer:
if is_solution_quad(var, coeff, x_0, y_0):
result.add((x_0, y_0))
else:
s = BinaryQuadratic(self.equation, free_symbols=var[::-1]).solve(parameters=[t, u]) # Interchange x and y
while s:
result.add(s.pop()[::-1]) # and solution <--------+
# (4) B**2 - 4*A*C > 0 and B**2 - 4*A*C not a square or B**2 - 4*A*C < 0
else:
P, Q = _transformation_to_DN(var, coeff)
D, N = _find_DN(var, coeff)
solns_pell = diop_DN(D, N)
if D < 0:
for x0, y0 in solns_pell:
for x in [-x0, x0]:
for y in [-y0, y0]:
s = P*Matrix([x, y]) + Q
try:
result.add([as_int(_) for _ in s])
except ValueError:
pass
else:
# In this case equation can be transformed into a Pell equation
solns_pell = set(solns_pell)
solns_pell.update((-X, -Y) for X, Y in list(solns_pell))
a = diop_DN(D, 1)
T = a[0][0]
U = a[0][1]
if all(int_valued(_) for _ in P[:4] + Q[:2]):
for r, s in solns_pell:
_a = (r + s*sqrt(D))*(T + U*sqrt(D))**t
_b = (r - s*sqrt(D))*(T - U*sqrt(D))**t
x_n = _mexpand(S(_a + _b) / 2)
y_n = _mexpand(S(_a - _b) / (2*sqrt(D)))
s = P*Matrix([x_n, y_n]) + Q
result.add(s)
else:
L = ilcm(*[_.q for _ in P[:4] + Q[:2]])
k = 1
T_k = T
U_k = U
while (T_k - 1) % L != 0 or U_k % L != 0:
T_k, U_k = T_k*T + D*U_k*U, T_k*U + U_k*T
k += 1
for X, Y in solns_pell:
for i in range(k):
if all(int_valued(_) for _ in P*Matrix([X, Y]) + Q):
_a = (X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t
_b = (X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t
Xt = S(_a + _b) / 2
Yt = S(_a - _b) / (2*sqrt(D))
s = P*Matrix([Xt, Yt]) + Q
result.add(s)
X, Y = X*T + D*U*Y, X*U + Y*T
return result
|
BinaryQuadratic
|
python
|
getsentry__sentry
|
src/sentry/issues/ingest.py
|
{
"start": 4256,
"end": 5513
}
|
class ____(TypedDict):
platform: str | None
message: str
level: int | None
culprit: str | None
last_seen: datetime
first_seen: datetime
active_at: datetime
type: int
data: OccurrenceMetadata
first_release: Release | None
priority: int | None
@sentry_sdk.tracing.trace
def _create_issue_kwargs(
occurrence: IssueOccurrence, event: Event, release: Release | None
) -> IssueArgs:
priority = occurrence.priority or occurrence.type.default_priority
kwargs: IssueArgs = {
"platform": event.platform,
# TODO: Figure out what message should be. Or maybe we just implement a platform event and
# define it in `search_message` there.
"message": event.search_message,
"level": LOG_LEVELS_MAP.get(occurrence.level),
"culprit": truncatechars(occurrence.culprit, MAX_CULPRIT_LENGTH),
"last_seen": event.datetime,
"first_seen": event.datetime,
"active_at": event.datetime,
"type": occurrence.type.type_id,
"first_release": release,
"data": materialize_metadata(occurrence, event),
"priority": priority,
}
kwargs["data"]["last_received"] = json.datetime_to_str(event.datetime)
return kwargs
|
IssueArgs
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_defaults.py
|
{
"start": 1235,
"end": 4132
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_string(self):
# note: that the datatype is an Integer here doesn't matter,
# the server_default is interpreted independently of the
# column's datatype.
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5')"
)
def test_string_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5'6"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5''6')"
)
def test_text(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 + 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 + 8)"
)
def test_text_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 ' 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 ' 8)"
)
def test_literal_binds_w_quotes(self):
m = MetaData()
t = Table(
"t", m, Column("x", Integer, server_default=literal("5 ' 8"))
)
self.assert_compile(
CreateTable(t), """CREATE TABLE t (x INTEGER DEFAULT '5 '' 8')"""
)
def test_text_literal_binds(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x", Integer, server_default=text("q + :x1").bindparams(x1=7)
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT q + 7)"
)
def test_sqlexpr(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x",
Integer,
server_default=literal_column("a") + literal_column("b"),
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT a + b)"
)
def test_literal_binds_plain(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, server_default=literal("a") + literal("b")),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 'a' || 'b')"
)
def test_literal_binds_pgarray(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", ARRAY(Integer), server_default=array([1, 2, 3])),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])",
dialect="postgresql",
)
|
DDLTest
|
python
|
pytorch__pytorch
|
test/test_nestedtensor.py
|
{
"start": 32268,
"end": 122449
}
|
class ____(NestedTensorTestCase):
# Helper function to generate a pair of random nested tensors
# the 2 nested tensors have same shapes
def random_nt_pair(self, device, dtype, num_tensors, max_dims):
ts1 = []
ts2 = []
for _ in range(num_tensors):
tensor_dims = tuple(
[
torch.randint(low=0, high=max_dim, size=(1,)).item()
for max_dim in max_dims
]
)
t1 = torch.randn(tensor_dims, device=device, dtype=dtype)
t2 = torch.randn(tensor_dims, device=device, dtype=dtype)
ts1.append(t1)
ts2.append(t2)
return (
torch.nested.nested_tensor(ts1, device=device, dtype=dtype),
torch.nested.nested_tensor(ts2, device=device, dtype=dtype),
)
@dtypes(*floating_types_and_half())
def test_detach(self, device, dtype):
a = torch.randn(2, 4, device=device, dtype=dtype, requires_grad=False)
b = torch.randn(5, 4, device=device, dtype=dtype, requires_grad=False)
x = torch.nested.nested_tensor([a, b], requires_grad=True)
x_detach = x.detach()
z = x_detach * 4
self.assertFalse(x_detach.requires_grad)
self.assertFalse(z.requires_grad)
a = torch.randn(2, 4, device=device, dtype=dtype, requires_grad=True)
b = torch.randn(5, 4, device=device, dtype=dtype, requires_grad=True)
x = torch.nested.as_nested_tensor([a, b])
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
torch.nested.to_padded_tensor(z, 0).sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(a.grad, torch.ones(2, 4, device=device, dtype=dtype))
self.assertEqual(b.grad, torch.ones(5, 4, device=device, dtype=dtype))
@dtypes(torch.float, torch.double, torch.half)
@parametrize("requires_grad", [False, True])
@parametrize("weights_only", [False, True])
def test_serialization(self, device, dtype, requires_grad, weights_only):
def compare_metadata(nt1, nt2):
self.assertEqual(nt1._nested_tensor_size(), nt2._nested_tensor_size())
self.assertEqual(nt1._nested_tensor_strides(), nt2._nested_tensor_strides())
self.assertEqual(
nt1._nested_tensor_storage_offsets(),
nt2._nested_tensor_storage_offsets(),
)
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7))
for a in [nt_contiguous, nt_noncontiguous]:
buffer = io.BytesIO()
serialized = torch.save(a, buffer)
buffer.seek(0)
b = torch.load(buffer, weights_only=weights_only)
# should be both conceptually equal and metadata equivalent
self.assertEqual(a, b)
compare_metadata(a, b)
# should be conceptually equal but not necessarily metadata equivalent
self.assertEqual(b, nt_contiguous)
self.assertEqual(b, nt_noncontiguous)
@dtypes(torch.float, torch.float16, torch.double)
def test_unbind_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device, dtype
)
ub_contiguous = nt_contiguous.unbind()
ub_noncontiguous = nt_noncontiguous.unbind()
self.assertEqual(len(ub_contiguous), len(ub_noncontiguous))
n = len(ub_contiguous)
for i in range(n):
self.assertEqual(ub_contiguous[i], ub_noncontiguous[i])
@dtypes(torch.float)
@skipMeta
def test_to_then_from_padded_tensor_no_transform0213(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
padded = torch.nested.to_padded_tensor(nt, 0)
nt_to = torch._nested_from_padded_and_nested_example(padded, nt)
for t1, t2 in zip(nt.unbind(), nt_to.unbind()):
self.assertEqual(t1, t2)
self.assertEqual(nt.device, nt_to.device)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
@skipMeta
@torch.inference_mode()
def test_layer_norm(self, device, dtype):
def _test(size):
# Simple shapes test
t0 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t0, t1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
nt_result = layer_norm(nt)
for nt_subresult, t in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
self.assertEqual(nt_subresult, t_result)
# More complex nt test with different lengths for each tensor
t0 = torch.randn(4, size, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(10, size, device=device, dtype=dtype, requires_grad=False)
t2 = torch.randn(7, size, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
nt_result = layer_norm(nt)
for nt_subresult, t in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
self.assertEqual(nt_subresult, t_result)
if size <= 128:
# Test with multidimensional tensors after irregular dim
# (run only with smaller dimensions to ensure fast execution)
t0 = torch.randn(
4, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
t1 = torch.randn(
10, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
t2 = torch.randn(
7, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(
(size, size, 4), device=device, dtype=dtype
)
nt_result = layer_norm(nt)
for nt_subresult, t in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size, size, 4).squeeze(0))
self.assertEqual(nt_subresult, t_result)
# Test where the normalizing dimensions are not all
layer_norm = torch.nn.LayerNorm((size, 4), device=device, dtype=dtype)
nt_result = layer_norm(nt)
for nt_subresult, t in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size, size, 4).squeeze(0))
self.assertEqual(nt_subresult, t_result)
for size in (1024, 1023, 513, 512, 256, 128, 2, 4, 32):
_test(size)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
@skipMeta
@torch.inference_mode()
def test_layer_norm_breaking(self, device, dtype):
size = 128
t0 = torch.randn(
4, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
t1 = torch.randn(
10, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
t2 = torch.randn(
7, size, size, 4, device=device, dtype=dtype, requires_grad=False
)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm((4, size, size, 4), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"normalized_shape extends into irregular dimensions for the nested tensor",
lambda: layer_norm(nt),
)
layer_norm = torch.nn.LayerNorm((size + 1, size, 4), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"The shape at dimension 0",
lambda: layer_norm(nt),
)
@parametrize("layout", [torch.strided, torch.jagged], name_fn=layout_name)
def test_embedding(self, device, layout):
inputs = [
torch.randint(100, (L,), device=device, dtype=torch.int64)
for L in torch.randint(5, 50, (8,))
]
x = torch.nested.nested_tensor(
inputs, device=device, dtype=torch.int64, layout=layout
)
emb = torch.nn.Embedding(100, 8, device=device)
y = emb(x)
if layout == torch.jagged:
y.backward(torch.randn_like(y))
@torch._dynamo.disable
def check(inputs, y):
ys = y.unbind()
for i, inp in enumerate(inputs):
self.assertEqual(emb(inp), ys[i])
check(inputs, y)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_max_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_max = x.max(dim=1)
expected_max = torch.tensor([9, 19, 29], dtype=dtype, device=device)
self.assertEqual(result_max.values, expected_max)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_min_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_min = x.min(dim=1)
expected_min = torch.tensor([0, 0, 0], dtype=dtype, device=device)
self.assertEqual(result_min.values, expected_min)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_amax_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_amax = x.amax(dim=1)
expected_amax = torch.tensor([9, 19, 29], dtype=dtype, device=device)
self.assertEqual(result_amax, expected_amax)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_amin_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_amin = x.amin(dim=1)
expected_amin = torch.tensor([0, 0, 0], dtype=dtype, device=device)
self.assertEqual(result_amin, expected_amin)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_argmax_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_argmax = x.argmax(dim=1)
expected_argmax = torch.tensor([9, 19, 29], dtype=torch.long, device=device)
self.assertEqual(result_argmax, expected_argmax)
@dtypes(
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.uint8,
torch.float,
torch.float16,
torch.bfloat16,
torch.double,
)
def test_jagged_argmin_dtypes(self, device, dtype):
x = torch.nested.nested_tensor(
[torch.arange(0, n, dtype=dtype, device=device) for n in (10, 20, 30)],
layout=torch.jagged,
)
result_argmin = x.argmin(dim=1)
expected_argmin = torch.tensor([0, 0, 0], dtype=torch.long, device=device)
self.assertEqual(result_argmin, expected_argmin)
@skipMeta
@torch.inference_mode()
@dtypes(*floating_types_and_half())
def test_masked_fill(self, device, dtype):
# nested tensor * nested tensor
(nt, mask) = self.random_nt_pair(device, dtype, 4, (4, 4))
mask = torch.nested.nested_tensor([m < 0 for m in mask.unbind()])
ref = torch.nested.nested_tensor(
[t.masked_fill(m, 0) for (t, m) in zip(nt.unbind(), mask.unbind())]
)
out = nt.masked_fill(mask, 0)
self.assertEqual(ref, out)
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_simple(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = torch.nested.to_padded_tensor(nt, padding_value)
correct_output = t.clone()
if padding_value == 0:
correct_output[0][-1] = torch.zeros_like(correct_output[0][-1])
else:
correct_output[0][-1] = torch.ones_like(correct_output[0][-1])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_output_size(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
output_size = (4, 6, 5)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = torch.nested.to_padded_tensor(
nt, padding_value, output_size=output_size
)
correct_output = (
torch.ones(output_size, device=device, dtype=dtype) * padding_value
)
correct_output[:4:, :4, :4] = t.clone()
if padding_value == 0:
correct_output[0][3] = torch.zeros_like(correct_output[0][3])
else:
correct_output[0][3] = torch.ones_like(correct_output[0][3])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim2(self, device, dtype):
ts = [
torch.randn(160, device=device, dtype=dtype),
torch.randn(1240, device=device, dtype=dtype),
torch.randn(2400, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[: t.size(0)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim3(self, device, dtype):
ts = [
torch.randn(16, 21, device=device, dtype=dtype),
torch.randn(24, 32, device=device, dtype=dtype),
torch.randn(40, 53, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[: t.size(0), : t.size(1)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim4(self, device, dtype):
ts = [
torch.randn(16, 21, 13, device=device, dtype=dtype),
torch.randn(24, 32, 14, device=device, dtype=dtype),
torch.randn(40, 53, 16, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[: t.size(0), : t.size(1), : t.size(2)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
# TODO: test noncontiguous to_padded_tensor
# For now this tests the functionality of noncontiguous_to_padded_tensor
# and the error message of to_padded_tensor
# since to_padded_tensor does not support noncontiguous buffer yet
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_to_padded_tensor_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device, dtype
)
# test noncontiguous_to_padded_tensor functionality
self.assertEqual(
torch.nested.to_padded_tensor(nt_contiguous, 0.0),
noncontiguous_to_padded_tensor(nt_noncontiguous),
)
# test to_padded_tensor error message
self.assertRaisesRegex(
RuntimeError,
r"for now to_padded_tensor only supports contiguous nested tensor",
lambda: torch.nested.to_padded_tensor(nt_noncontiguous, 0.0),
)
@skipMeta
def test_device_checks(self, device):
nt = torch.nested.nested_tensor([], device=device)
is_cuda = "cuda" in str(device)
self.assertEqual(nt.is_cuda, is_cuda)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_share_memory(self, device):
a = torch.randn(3, 4, device=device)
b = torch.randn(5, 4, device=device)
nt = torch.nested.nested_tensor([a, b], layout=torch.jagged)
# Guard CUDA tensors
if "cuda" in device:
result = nt.share_memory_()
self.assertIs(result, nt)
return
result = nt.share_memory_()
self.assertIs(result, nt)
# Verify in shared memory
self.assertTrue(nt.is_shared())
@dtypes(torch.float, torch.float16, torch.double)
def test_nested_tensor_indexing(self, device, dtype):
# edge case: empty nested tensor
nt0 = torch.nested.nested_tensor([])
self.assertRaises(IndexError, lambda: nt0[0])
# normal case
x0 = torch.randn((2, 5), device=device, dtype=dtype)
x1 = torch.randn((3, 4), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1])
# single index: only support integer in the batch dimension
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
self.assertRaises(IndexError, lambda: nt[2])
self.assertRaises(IndexError, lambda: nt[-3])
self.assertRaises(NotImplementedError, lambda: nt[:])
self.assertEqual(nt[...], nt)
# tuple of indices: only support integer in the batch dimension
# + all possible indexing in the original tensor dimensions
self.assertEqual(nt[0, 0, 0], x0[0, 0])
self.assertEqual(nt[0, 1, :], x0[1, :])
self.assertEqual(nt[1, ...], x1)
self.assertRaises(IndexError, lambda: nt[1, 4, 2])
self.assertRaises(NotImplementedError, lambda: nt[:, 1, 1])
# test select on non-batch dimensions
self.assertEqual(nt.select(1, 0)[0], x0.select(0, 0))
self.assertEqual(nt.select(1, 0)[1], x1.select(0, 0))
self.assertRaises(IndexError, lambda: nt.select(1, 3))
self.assertEqual(nt.select(2, 0)[0], x0.select(1, 0))
self.assertEqual(nt.select(2, 0)[1], x1.select(1, 0))
self.assertRaises(IndexError, lambda: nt.select(2, 5))
# make sure indexing returns a view
nt[0].fill_(100.0)
answer = torch.tensor(100.0, device=device, dtype=dtype).expand((2, 5))
self.assertEqual(nt[0], answer)
nt[1, 1, :].fill_(200.0)
answer = torch.tensor(200.0, device=device, dtype=dtype).expand(4)
self.assertEqual(nt[1, 1, :], answer)
# Test that indexing works when requires_grad_(True)
# previously this was failing because the backward kernel for select.int uses .sizes()
nt = torch.nested.nested_tensor([x0, x1]).requires_grad_(True)
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
grad_x0 = torch.randn((2, 5), device=device, dtype=dtype)
nt[0].backward(grad_x0)
expected_grad = torch.nested.nested_tensor(
[grad_x0, torch.zeros((3, 4), device=device, dtype=dtype)]
)
self.assertEqual(nt.grad, expected_grad)
@parametrize(
"func",
[
subtest(torch.nn.functional.relu, name="relu"),
subtest(torch.nn.functional.relu_, name="relu_"),
subtest(torch.nn.functional.gelu, name="gelu"),
subtest(torch._C._nn.gelu_, name="gelu_"),
subtest(torch.tanh, name="tanh"),
subtest(torch.tanh_, name="tanh_"),
subtest(torch.neg, name="neg"),
subtest(torch.nn.functional.silu, name="silu"),
subtest(partial(torch.nn.functional.silu, inplace=True), name="silu_"),
subtest(torch.abs, name="abs"),
subtest(torch.abs_, name="abs_"),
subtest(torch.sgn, name="sgn"),
subtest(torch.logical_not, name="logical_not"),
subtest(torch.sin, name="sin"),
subtest(torch.cos, name="cos"),
subtest(torch.isinf, name="isinf"),
subtest(torch.isposinf, name="isposinf"),
subtest(torch.isneginf, name="isneginf"),
subtest(torch.isnan, name="isnan"),
subtest(torch.sqrt, name="sqrt"),
],
)
def test_unary_funcs(self, device, func):
nt, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device=device, dtype=torch.float32
)
nested_result = func(nt)
self.assertTrue(nested_result.is_nested)
for t, t_res in zip(nt.unbind(), nested_result.unbind()):
self.assertEqual(func(t), t_res)
self.assertRaisesRegex(
RuntimeError,
"NestedTensor must be contiguous to get buffer.",
lambda: func(nt_noncontiguous),
)
def test_is_any_true_jagged(self, device):
B, Fin = 2, 6
start = torch.zeros(B, dtype=torch.int64, device=device)
lengths = torch.tensor([3, 2], dtype=torch.int64, device=device)
# NestedTensor reduction should operate on same data as .values().
with self.subTest("dispatch_matches_values_buffer"):
cond = torch.tensor(
[
[True, False, False, True, True, False],
[False, False, True, False, False, False],
],
dtype=torch.bool,
device=device,
)
nt = torch.nested.narrow(
cond, dim=1, start=start, length=lengths, layout=torch.jagged
)
out_nt = torch.ops.aten._is_any_true.default(nt).item()
out_vals = torch.ops.aten._is_any_true.default(nt.values()).item()
self.assertEqual(out_nt, out_vals)
# Verify jagged boolean behavior.
with self.subTest("all_false_returns_false"):
cond_false = torch.zeros(B, Fin, dtype=torch.bool, device=device)
nt_false = torch.nested.narrow(
cond_false, dim=1, start=start, length=lengths, layout=torch.jagged
)
self.assertFalse(torch.ops.aten._is_any_true.default(nt_false).item())
with self.subTest("one_true_returns_true"):
cond_mixed = torch.zeros(B, Fin, dtype=torch.bool, device=device)
cond_mixed[0, 0] = True
nt_mixed = torch.nested.narrow(
cond_mixed, dim=1, start=start, length=lengths, layout=torch.jagged
)
self.assertTrue(torch.ops.aten._is_any_true.default(nt_mixed).item())
def test_is_all_true_jagged(self, device):
B, Fin = 2, 6
start = torch.zeros(B, dtype=torch.int64, device=device)
lengths = torch.tensor([3, 2], dtype=torch.int64, device=device)
# NestedTensor reduction should operate on same data as .values().
with self.subTest("dispatch_matches_values_buffer"):
cond = torch.tensor(
[
[True, True, True, False, False, False],
[True, True, False, False, False, False],
],
dtype=torch.bool,
device=device,
)
nt = torch.nested.narrow(
cond, dim=1, start=start, length=lengths, layout=torch.jagged
)
out_nt = torch.ops.aten._is_all_true.default(nt).item()
out_vals = torch.ops.aten._is_all_true.default(nt.values()).item()
self.assertEqual(out_nt, out_vals)
# Verify jagged boolean behavior.
with self.subTest("all_true_returns_true"):
cond_true = torch.ones(B, Fin, dtype=torch.bool, device=device)
nt_true = torch.nested.narrow(
cond_true, dim=1, start=start, length=lengths, layout=torch.jagged
)
self.assertTrue(torch.ops.aten._is_all_true.default(nt_true).item())
with self.subTest("any_false_returns_false"):
cond_mixed = torch.ones(B, Fin, dtype=torch.bool, device=device)
cond_mixed[0, 1] = False
nt_mixed = torch.nested.narrow(
cond_mixed, dim=1, start=start, length=lengths, layout=torch.jagged
)
self.assertFalse(torch.ops.aten._is_all_true.default(nt_mixed).item())
@parametrize("func", [subtest(torch.ge, name="ge"), subtest(torch.eq, name="eq")])
def test_binary_ops_with_scalar(self, device, func):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device=device, dtype=torch.float32
)
scalar = 0.0
# should work regardless of contiguity
for nt in (nt_contiguous, nt_noncontiguous):
nested_result = func(nt, scalar)
self.assertTrue(nested_result.is_nested)
for t, t_res in zip(nt.unbind(), nested_result.unbind()):
self.assertEqual(func(t, scalar), t_res)
@dtypes(*floating_types_and_half())
def test_nested_tensor_chunk(self, device, dtype):
# Transformer use case
a = torch.randn(3, 3 * 4, device=device, dtype=dtype)
b = torch.randn(2, 3 * 4, device=device, dtype=dtype)
c = torch.randn(1, 3 * 4, device=device, dtype=dtype)
a_chunks = a.chunk(3, dim=-1)
b_chunks = b.chunk(3, dim=-1)
c_chunks = c.chunk(3, dim=-1)
a_nt = [a_chunks[0], b_chunks[0], c_chunks[0]]
b_nt = [a_chunks[1], b_chunks[1], c_chunks[1]]
c_nt = [a_chunks[2], b_chunks[2], c_chunks[2]]
nt = torch.nested.nested_tensor([a, b, c])
chunked = nt.chunk(3, dim=-1)
self.assertEqual(chunked[0], torch.nested.nested_tensor(a_nt))
self.assertEqual(chunked[1], torch.nested.nested_tensor(b_nt))
self.assertEqual(chunked[2], torch.nested.nested_tensor(c_nt))
for chunk in chunked:
self.assertFalse(chunk.is_contiguous())
# Failure chunking on ragged dimensions
self.assertRaisesRegex(
RuntimeError,
"Chunk for nested tensors is currently only supported for the last dimension.",
lambda: torch.chunk(nt, 5, dim=1),
)
self.assertRaisesRegex(
RuntimeError,
"Chunk for nested tensors is currently only supported for the last dimension.",
lambda: torch.chunk(nt, 5, dim=0),
)
# Failure on non-contiguous nt
_, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
self.assertRaisesRegex(
RuntimeError,
"chunk expects `self` to be contiguous.",
lambda: torch.chunk(nt_noncontiguous, 5, dim=-1),
)
# Failure when calling non divisible n_chunks
self.assertRaisesRegex(
RuntimeError,
"Chunk for nested tensors is only supported for "
"nested tensors with trailing dimension divisible by chunks.",
lambda: torch.chunk(nt, 5, dim=-1),
)
# Failure when calling backward on a chunk
a = torch.randn(3, 3 * 4, device=device, dtype=dtype, requires_grad=True)
b = torch.randn(2, 3 * 4, device=device, dtype=dtype, requires_grad=True)
nt_grad = torch.nested.as_nested_tensor([a, b])
chunked = torch.chunk(nt_grad, 2, dim=-1)
self.assertRaisesRegex(
RuntimeError,
"Nested Strided Tensor doesn't support chunk backward.",
lambda: chunked[0].backward(chunked[0].clone()),
)
@dtypes(*floating_types_and_half())
def test_nested_tensor_split_with_sizes(self, device, dtype):
a = torch.randn(3, 20, device=device, dtype=dtype)
b = torch.randn(2, 20, device=device, dtype=dtype)
c = torch.randn(1, 20, device=device, dtype=dtype)
split_sizes = [4, 6, 10]
a_splits = a.split_with_sizes(split_sizes, dim=-1)
b_splits = b.split_with_sizes(split_sizes, dim=-1)
c_splits = c.split_with_sizes(split_sizes, dim=-1)
nt = torch.nested.nested_tensor([a, b, c])
nt_splits = nt.split_with_sizes(split_sizes, dim=-1)
for i, nt_split in enumerate(nt_splits):
self.assertEqual(
nt_split,
torch.nested.nested_tensor([a_splits[i], b_splits[i], c_splits[i]]),
)
dense_strides = torch.stack(
[
torch.tensor(a_splits[i].stride()),
torch.tensor(b_splits[i].stride()),
torch.tensor(c_splits[i].stride()),
]
)
self.assertEqual(nt_split._nested_tensor_strides(), dense_strides)
self.assertFalse(nt_split.is_contiguous())
# Failure calling on ragged dimensions
self.assertRaisesRegex(
RuntimeError,
"split_with_sizes for nested tensors is currently only supported for the last dimension.",
lambda: torch.split_with_sizes(nt, split_sizes, dim=1),
)
# Failure calling on non-last dimension
self.assertRaisesRegex(
RuntimeError,
"split_with_sizes for nested tensors is currently only supported for the last dimension.",
lambda: torch.split_with_sizes(nt, split_sizes, dim=0),
)
# Failure on non-contiguous nt
_, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
self.assertRaisesRegex(
RuntimeError,
"split_with_sizes expects `self` to be contiguous.",
lambda: torch.split_with_sizes(nt_noncontiguous, split_sizes, dim=-1),
)
# Failure when calling with split_sizes that don't cover the full dim size
bad_split_sizes = [4, 6, 9] # don't add up to 20
self.assertRaisesRegex(
RuntimeError,
"split_with_sizes expects split_sizes to sum exactly to 20",
lambda: torch.split_with_sizes(nt, bad_split_sizes, dim=-1),
)
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_nested_tensor_indexing_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device, dtype
)
self.assertEqual(nt_contiguous.size(0), nt_noncontiguous.size(0))
n = nt_contiguous.size(0)
for i in range(n):
self.assertEqual(nt_contiguous[i], nt_noncontiguous[i])
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
@parametrize("transpose", [True, False])
def test_nested_tensor_add(self, device, dtype, transpose):
if transpose:
a = torch.randn(2, 2, 2, device=device, dtype=dtype)
b = torch.rand(2, 2, 2, device=device, dtype=dtype)
c = a.transpose(-1, -2).contiguous()
d = b.transpose(-1, -2).contiguous()
nt1 = torch.nested.nested_tensor([a, b, a, b])
nt2 = torch.nested.nested_tensor([c, d, c, d]).transpose(-1, -2)
else:
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor(
[t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
)
out = nt1 + nt2
self.assertEqual(ref, out)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
@parametrize("transpose", [True, False])
def test_nested_tensor_sub(self, device, dtype, transpose):
if transpose:
a = torch.randn(2, 2, 2, device=device, dtype=dtype)
b = torch.rand(2, 2, 2, device=device, dtype=dtype)
c = a.transpose(-1, -2).contiguous()
d = b.transpose(-1, -2).contiguous()
nt1 = torch.nested.nested_tensor([a, b, a, b])
nt2 = torch.nested.nested_tensor([c, d, c, d]).transpose(-1, -2)
else:
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor(
[t1 - t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
)
out = nt1 - nt2
self.assertEqual(ref, out)
@onlyCUDA
@dtypes(torch.float, torch.float16)
@torch.inference_mode()
@parametrize("embedding_dim", [8, 128, 256, 384])
def test_nested_tensor_dense_elementwise(self, device, dtype, embedding_dim):
def _test_add_mul(nt, t):
ref_add = torch.nested.nested_tensor(
[t1 + t2 for (t1, t2) in zip(nt.unbind(), t.unbind())]
)
ref_mul = torch.nested.nested_tensor(
[t1 * t2 for (t1, t2) in zip(nt.unbind(), t.unbind())]
)
self.assertEqual(nt.add(t), ref_add)
self.assertEqual(nt.mul(t), ref_mul)
batch_size = 32
seq_lens = torch.randint(low=0, high=10, size=(batch_size,))
# [B, *, D], [B, 1, D] case
ts = [torch.randn((seq_len, embedding_dim)) for seq_len in seq_lens]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
t = torch.randn((batch_size, 1, embedding_dim), device=device, dtype=dtype)
_test_add_mul(nt, t)
# [B, *], [B, 1] case
ts = [torch.randn(seq_len) for seq_len in seq_lens]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
t = torch.randn((batch_size, 1), device=device, dtype=dtype)
_test_add_mul(nt, t)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor(
[t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
)
out = nt1 * nt2
self.assertEqual(ref, out)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
out_number0 = nt1 * number
out_number1 = number * nt1
out_scalar0 = nt1 * scalar
out_scalar1 = scalar * nt1
self.assertEqual(out_number0, ref)
self.assertEqual(out_number1, ref)
self.assertEqual(out_scalar0, ref)
self.assertEqual(out_scalar1, ref)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul(vector),
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul(nt1),
)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_div(self, device, dtype):
nt, nt2 = self.random_nt_pair(device, dtype, 4, (4, 4))
scale = 4.0
ref = torch.nested.nested_tensor([t / scale for t in nt.unbind()])
out = nt / 4.0
self.assertEqual(ref, out)
ref_transposed = ref.transpose(1, 2)
out = nt.transpose(1, 2) / 4.0
self.assertEqual(ref_transposed, out)
ref = torch.nested.nested_tensor(
[t / t2 for (t, t2) in zip(nt.unbind(), nt2.unbind())]
)
out = nt / nt2
self.assertEqual(ref, out)
out = nt.transpose(1, 2) / nt2.transpose(1, 2)
self.assertEqual(ref.transpose(1, 2), out)
nt_transpose_copy = torch.nested.nested_tensor(
[t.transpose(0, 1) for t in nt.unbind()]
)
self.assertRaisesRegex(
RuntimeError,
"div requires strides to match when given NestedTensors",
lambda: nt_transpose_copy.transpose(1, 2) / nt2,
)
nt = torch.nested.nested_tensor(
[torch.randn(i, 4) for i in [3, 4, 5]], device=device, dtype=dtype
)
nt_chunks = nt.chunk(2, -1)
self.assertRaisesRegex(
RuntimeError,
"div requires offsets to match when given NestedTensors",
lambda: nt_chunks[0] / nt_chunks[1],
)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_add_in_place(self, device, dtype):
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor(
[t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
)
nt1 += nt2
self.assertEqual(ref, nt1)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul_in_place(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor(
[t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())]
)
nt1 *= nt2
self.assertEqual(ref, nt1)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
out_number = nt1.clone()
out_number *= number
out_scalar = nt1.clone()
out_scalar *= scalar
self.assertEqual(out_number, ref)
self.assertEqual(out_scalar, ref)
self.assertRaisesRegex(
RuntimeError,
r"output with shape \[.*\] doesn't match the broadcast shape \[.*\]",
lambda: scalar.mul_(nt1),
)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul_(vector),
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul_(nt1),
)
@onlyCPU
@skipMeta
@dtypes(torch.float)
def test_nested_tensor_sum_dim(self, device, dtype):
params = ((2, (1, 1)), ((4), (4, 4)), (10, (3, 5, 7)))
def test_sum(device, dtype, ntensors, max_sizes, dim, keepdim=True):
nt = random_nt(device, dtype, ntensors, max_sizes, require_non_empty=False)
nt2 = nt.clone()
ub2 = nt2.unbind()
nt.requires_grad_(True)
[t.requires_grad_(True) for t in ub2]
nt_sum = nt.sum(dim=dim, keepdim=keepdim)
ub2_sum = [t.sum(-1, keepdim=keepdim) for t in ub2]
self.assertEqual(nt_sum, torch.nested.nested_tensor(ub2_sum))
# test backward
# generate gradient tensor that has the same size as the output
size = nt_sum._nested_tensor_size()
gt2 = []
for i in range(ntensors):
gt2.append(torch.randn(size[i].tolist(), device=device, dtype=dtype))
gt = torch.nested.nested_tensor(gt2).clone()
nt_sum.backward(gt)
for t2, g2 in zip(ub2_sum, gt2):
t2.backward(g2)
self.assertEqual(nt.grad, torch.nested.nested_tensor([t.grad for t in ub2]))
return
for ntensors, max_sizes in params:
test_sum(device, dtype, ntensors, max_sizes, len(max_sizes))
# Test error inputs
with self.assertRaisesRegex(
RuntimeError, "NestedTensor can only be reduced across the last"
):
torch.nested.nested_tensor(
[torch.tensor([3, 4, 5]), torch.tensor([1, 2])]
).sum(0, keepdim=True)
with self.assertRaisesRegex(
RuntimeError, "NestedTensor only allows reduction of a single"
):
torch.nested.nested_tensor(
[torch.tensor([[3, 4, 5]]), torch.tensor([[1, 2]])]
).sum([0, 1], keepdim=True)
with self.assertRaisesRegex(
RuntimeError, "NestedTensor always requires keepdim=True for now."
):
torch.nested.nested_tensor(
[torch.tensor([3, 4, 5]), torch.tensor([1, 2])]
).sum(-1)
@dtypes(torch.float, torch.float16)
def test_contiguous(self, device, dtype):
# Since we don't have access to the buffer in python this is harder to show what
# we are testing for. When we call chunk on a consistent dim of a NT
# for chunk_size > 1 the resulting tensors are views of the original NT
# whose numels is now less than the size of the buffer. Clone was
# previously creating a new NT with a buffer that was the same size as the
# original.
nt_contiguous = torch.nested.nested_tensor(
[
torch.randn(2, 20, device=device, dtype=dtype),
torch.randn(4, 20, device=device, dtype=dtype),
]
)
# Split up the last dimension which has a consistent size of 20 into 5 chunks
chunks = nt_contiguous.chunk(5, dim=-1)
# # Check chunks are contiguous after calling contiguous
for chunk in chunks:
self.assertFalse(chunk.is_contiguous())
self.assertTrue(chunk.contiguous().is_contiguous())
@dtypes(torch.float, torch.float16)
@skipMeta
def test_clone(self, device, dtype):
nt1 = random_nt(device, dtype, 4, (4, 4), (1, 1))
nt2 = nt1.clone()
# Verify the values match
self.assertEqual(nt1, nt2)
# Verify modifying nt2 doesn't affect nt1
nt2.mul_(nt1)
ub1 = nt1.unbind()
ub2 = nt2.unbind()
for i in range(len(ub1)):
self.assertNotEqual(ub1[i], ub2[i])
nt1.clone(memory_format=torch.preserve_format)
msg = "Nested tensor clone supports Preserve and Contiguous memory formats, called clone with memory format: ChannelsLast"
with self.assertRaisesRegex(RuntimeError, msg):
nt1.clone(memory_format=torch.channels_last)
# cannot test torch.float16 because: RuntimeError: "bernoulli_scalar_cpu_" not implemented for 'Half'
@decorateIf(xfailIfTorchDynamo, lambda params: params["layout"] == torch.jagged)
@dtypes(torch.float, torch.double)
@parametrize("layout", [torch.strided, torch.jagged], name_fn=layout_name)
def test_dropout(self, device, dtype, layout):
# edge case: empty nested tensor
# TODO: support empty NT in jagged layout
if layout == torch.strided:
nt0 = torch.nested.nested_tensor([], layout=layout)
y = torch.nn.functional.dropout(nt0, 0.5)
self.assertEqual(nt0, y)
# normal nested tensor
ntensors = 4
if layout == torch.jagged:
nt = random_nt(device, dtype, ntensors, (4, 4), (0, 3), layout=layout)
else:
nt = random_nt(device, dtype, ntensors, (4, 4), layout=layout)
# edge case: invalid dropout
self.assertRaises(ValueError, lambda: torch.nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: torch.nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, -0.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, 1.1))
# edge case: no dropout
dropouter = torch.nn.Dropout(0.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 0.0)
self.assertEqual(nt, y0)
self.assertEqual(nt, y1)
# edge case: all dropout
dropouter = torch.nn.Dropout(1.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 1.0)
nt0 = torch.zeros_like(nt)
self.assertEqual(nt0, y0)
self.assertEqual(nt0, y1)
# normal case: normal dropout
p = 0.2
y = torch.nn.functional.dropout(nt, p)
expect = nt.clone()
if layout == torch.jagged:
expect = torch.where(y == 0.0, y, nt)
expect /= 1.0 - p
self.assertEqual(y, expect)
else:
expect = nt.clone()
for i in range(ntensors):
actual_tensor = y[i].view(-1)
expect_tensor = expect[i].view(-1)
for j in range(actual_tensor.shape[0]):
if actual_tensor[j].item() == 0.0:
expect_tensor[j] = 0.0
else:
expect_tensor[j] /= 1.0 - p
self.assertEqual(y, expect)
with freeze_rng_state():
dropouter = torch.nn.Dropout(p)
y0 = dropouter(nt)
with freeze_rng_state():
y1 = torch.nn.functional.dropout(nt, p)
self.assertEqual(y0, y1)
@dtypes(torch.float, torch.double)
def test_dropout_noncontiguous(self, device, dtype):
ntensors = 4
nt0 = random_nt(device, dtype, ntensors, (4, 4))
nt1 = nt0.transpose(-1, -2)
p = 0.3
with freeze_rng_state():
dropouter = torch.nn.Dropout(p)
y0 = dropouter(nt0)
with freeze_rng_state():
y1 = torch.nn.functional.dropout(nt1, p).transpose(-1, -2)
self.assertEqual(y0, y1)
# cannot test torch.float16 because: RuntimeError: "softmax_kernel_impl" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_softmax(self, device, dtype):
# normal nested tensor
ntensors = 4
nt = random_nt(device, dtype, ntensors, (4, 4))
# error case: softmax across nested dimension
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, 0),
)
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, -3),
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, 3))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, -4))
# normal case: should equal to padding -inf
softmaxer = torch.nn.Softmax(1)
y0 = softmaxer(nt)
y1 = torch.nn.functional.softmax(nt, 1)
self.assertEqual(y0, y1)
pt = torch.nested.to_padded_tensor(nt, float("-inf"))
# if an entire slice is padded, then softmax will return 0.0 / 0.0 = nan
# however, physically speaking that should be 0.0
expect = torch.nn.functional.softmax(pt, 1).nan_to_num_(0.0)
self.assertEqual(torch.nested.to_padded_tensor(y0, 0.0), expect)
# edge case: empty nested tensor
nt0 = torch.nested.nested_tensor([])
y = torch.nn.functional.softmax(nt0, 1)
self.assertEqual(nt0, y)
# edge case: nesting scalars
nt1 = torch.nested.nested_tensor([torch.tensor(0.0), torch.tensor(1.0)])
self.assertRaises(RuntimeError, lambda: torch.nn.functional.softmax(nt1, 0))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt1, 1))
@dtypes(torch.float, torch.double)
@torch.inference_mode()
def test_softmax_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device, dtype
)
self.assertEqual(
torch.nn.functional.softmax(nt_contiguous, -1),
torch.nn.functional.softmax(nt_noncontiguous, -1),
)
def _test_bmm(self, device, dtype):
# error case: not 3D tensors
nt0 = torch.nested.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor(
[torch.randn(2), torch.randn(3)], device=device, dtype=dtype
)
nt2 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt0.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt0.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt0.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt1.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt1.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError, "batch1 must be a 3D tensor", lambda: nt1.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError, "batch2 must be a 3D tensor", lambda: nt2.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError, "batch2 must be a 3D tensor", lambda: nt2.bmm(nt1)
)
# error case: incompatible batch size
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((4, 6)), torch.randn((4, 5)), torch.randn((4, 7))],
device=device,
dtype=dtype,
)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 2 but got: 3.",
lambda: nt0.bmm(nt1),
)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 3 but got: 2.",
lambda: nt1.bmm(nt0),
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError,
r"0-th nested matrices in batch cannot be multiplied \(2x4 and 2x4\)",
lambda: nt0.bmm(nt0),
)
# normal nested tensor
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype
)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(
torch.nested.to_padded_tensor(nt1, 0.0)
)
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
# nested tensor bmm normal tensor
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 7)), torch.randn((3, 7))], device=device, dtype=dtype
)
nt1 = torch.rand(2, 7, 5, dtype=dtype, device=device)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(nt1)
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
# nested tensor bmm normal tensor with non-contiguous view
nt1 = torch.rand(2, 5, 7, dtype=dtype, device=device)
nt1 = nt1.transpose(1, 2)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(nt1)
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
# normal tensor bmm nested tensor
nt0 = torch.rand(2, 5, 7, dtype=dtype, device=device)
nt1 = torch.nested.nested_tensor(
[torch.randn((7, 6)), torch.randn((7, 5))], device=device, dtype=dtype
)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = nt0.bmm(torch.nested.to_padded_tensor(nt1, 0.0))
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
# test tensorcore path
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 8)), torch.randn((3, 16))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((8, 8)), torch.randn((16, 8))], device=device, dtype=dtype
)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(
torch.nested.to_padded_tensor(nt1, 0.0)
)
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
@onlyCUDA
@dtypes(torch.float, torch.double, torch.float16, torch.bfloat16)
@tf32_on_and_off(0.005)
def test_bmm_cuda(self, device, dtype):
self._test_bmm(device, dtype)
@onlyCPU
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm_cpu(self, device, dtype):
self._test_bmm(device, dtype)
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair(
(2, 3), device, dtype
)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair(
(6, 7), device, dtype
)
self.assertEqual(
nt0_contiguous.transpose(-1, -2).bmm(nt1_contiguous),
nt0_noncontiguous.transpose(-1, -2).bmm(nt1_noncontiguous),
)
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_matmul_with_bmm_path(self, device, dtype):
def unbind_rebind_matmul(nt1, nt2):
t1s = nt1.unbind()
t2s = nt2.unbind()
out_ts = [t1.matmul(t2) for t1, t2 in zip(t1s, t2s)]
return torch.nested.nested_tensor(out_ts)
# [N, n_head, *, head_dim], [N, n_head, head_dim, *]
Ns = [1, 2, 5]
n_heads = np.random.randint(2, 5)
head_dim = 3
t1s = []
t2s = []
for N in Ns:
for _ in range(N):
seq_len1 = np.random.randint(2, 5)
seq_len2 = np.random.randint(2, 5)
t1s.append(torch.randn(n_heads, seq_len1, head_dim))
t2s.append(torch.randn(n_heads, head_dim, seq_len2))
nt1 = torch.nested.nested_tensor(t1s, device=device, dtype=dtype)
nt2 = torch.nested.nested_tensor(t2s, device=device, dtype=dtype)
self.assertEqual(torch.matmul(nt1, nt2), unbind_rebind_matmul(nt1, nt2))
# test with noncontiguous
t3s = []
t4s = []
for _ in range(N):
seq_len = np.random.randint(2, 5)
t3s.append(torch.randn(seq_len, n_heads, head_dim))
t4s.append(torch.randn(seq_len, n_heads, head_dim))
nt3 = torch.nested.nested_tensor(t3s, device=device, dtype=dtype).transpose(
1, 2
)
nt4 = (
torch.nested.nested_tensor(t4s, device=device, dtype=dtype)
.transpose(1, 2)
.transpose(2, 3)
)
self.assertEqual(torch.matmul(nt3, nt4), unbind_rebind_matmul(nt3, nt4))
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul(self, device, dtype):
# error case: one is nested but the other is not
nt = torch.nested.nested_tensor(
[torch.randn(2), torch.randn(3)], device=device, dtype=dtype
)
t = torch.randn(4, device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a nested self and non-nested other",
lambda: torch.matmul(nt, t),
)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a non-nested self and nested other",
lambda: torch.matmul(t, nt),
)
# error case: not 3+D tensors
nt0 = torch.nested.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor(
[torch.randn(2), torch.randn(3)], device=device, dtype=dtype
)
nt2 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt0),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt1),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt2),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt0),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt1),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt2),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt0),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt1),
)
# error case: incompatible batch size
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((4, 6)), torch.randn((4, 5)), torch.randn((4, 7))],
device=device,
dtype=dtype,
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt0, nt1),
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt1, nt0),
)
# error case: incompatible (wrong) batch sizes that shouldn't even broadcast?
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 2, 4)), torch.randn((2, 3, 4))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((3, 4, 6)), torch.randn((3, 4, 5))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError,
"matmul(): For nested tensors, batch dimensions must have the same sizes,",
lambda: torch.matmul(nt0, nt1),
)
# error case: incompatible batch sizes that should technically broadcast
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 2, 4)), torch.randn((1, 3, 4))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((1, 4, 6)), torch.randn((3, 4, 5))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError,
"matmul(): For nested tensors, batch dimensions must have the same sizes,",
lambda: torch.matmul(nt0, nt1),
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype
)
self.assertRaisesRegex(
RuntimeError,
"matmul(): Nested tensors cannot be matrix multiplied",
lambda: torch.matmul(nt0, nt0),
)
# normal nested tensor: 3D
nt0 = torch.nested.nested_tensor(
[torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype
)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(
torch.nested.to_padded_tensor(nt0, 0.0),
torch.nested.to_padded_tensor(nt1, 0.0),
)
self.assertEqual(actual, expect)
# normal nested tensor: 4D (with testing for batch_size=1)
nt0 = torch.nested.nested_tensor(
[torch.randn((1, 2, 4)), torch.randn((8, 3, 7))], device=device, dtype=dtype
)
nt1 = torch.nested.nested_tensor(
[torch.randn((1, 4, 6)), torch.randn((8, 7, 5))], device=device, dtype=dtype
)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(
torch.nested.to_padded_tensor(nt0, 0.0),
torch.nested.to_padded_tensor(nt1, 0.0),
)
self.assertEqual(actual, expect)
# normal nested tensor: 5D
nt0 = torch.nested.nested_tensor(
[torch.randn((8, 9, 2, 4)), torch.randn((8, 9, 3, 7))],
device=device,
dtype=dtype,
)
nt1 = torch.nested.nested_tensor(
[torch.randn((8, 9, 4, 6)), torch.randn((8, 9, 7, 5))],
device=device,
dtype=dtype,
)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(
torch.nested.to_padded_tensor(nt0, 0.0),
torch.nested.to_padded_tensor(nt1, 0.0),
)
self.assertEqual(actual, expect)
# only supported on CUDA for now
@dtypes(torch.float, torch.double)
def test_matmul_nt_with_broadcasted_t(self, device, dtype):
# NT (B, *, C, D) with T (D, E) broadcasting case
nt = random_nt_from_dims([3, None, 4, 5], device=device, dtype=dtype)
t = torch.randn(5, 6, device=device, dtype=dtype)
output = torch.matmul(nt, t)
# should be equivalent to matmul-ing each component with the dense tensor
self.assertEqual(nt.size(0), output.size(0))
for component, out_component in zip(nt, output):
self.assertEqual(out_component, torch.matmul(component, t))
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair(
(2, 3), device, dtype
)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair(
(6, 7), device, dtype
)
self.assertEqual(
torch.matmul(nt0_contiguous.transpose(-1, -2), nt1_contiguous),
torch.matmul(nt0_noncontiguous.transpose(-1, -2), nt1_noncontiguous),
)
@dtypes(torch.float, torch.double)
def test_linear(self, device, dtype):
a = torch.randn(1, 2, device=device, dtype=dtype)
b = torch.randn(2, 2, device=device, dtype=dtype)
c = torch.randn(3, 2, device=device, dtype=dtype)
nt = torch.nested.nested_tensor([a, b, c])
weight = torch.randn(2, 2, device=device, dtype=dtype)
bias = torch.randn(2, device=device, dtype=dtype)
# success case
torch.functional.F.linear(nt, weight, bias)
# invalid nested tensor dimension
msg = r"Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 2. Dense tensor dim: 2"
nt1 = torch.nested.nested_tensor(
[
torch.randn(1, device=device, dtype=dtype),
torch.randn(2, device=device, dtype=dtype),
]
)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt1, weight, bias)
# invalid weight shape
msg = r"Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 3. Dense tensor dim: 3"
weight1 = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight1, bias)
# inconsistent last dim of nested tensor
msg = r"Expected all tensors in nested tensor to have the same trailing dimension, instead last dimension equals:"
nt2 = torch.nested.nested_tensor(
[
torch.randn(1, 2, device=device, dtype=dtype),
torch.randn(2, 3, device=device, dtype=dtype),
]
)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt2, weight, bias)
# Mismatch of nested tensor last dim and weight dimension
weight2 = torch.randn(2, 4, device=device, dtype=dtype)
msg = (
r"Shape mismatch for NestedTensor Linear: Expected input's \(a nested tensor\) 'last_dim'"
r" to equal 'weight.size\(1\), but got: last_dim = 2, and weight.size\(1\) = 4"
)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight2, bias)
# Nested tensor input and nested weight
nt_weight = nt.clone()
msg = r"Linear does not support nested weight when input is a nested tensor."
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, nt_weight, bias)
# TODO: test noncontiguous linear
# For now this tests the error message of linear
# since linear does not support noncontiguous buffer yet
@dtypes(torch.float, torch.double)
def test_linear_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair(
(2, 3, 6, 7), device, dtype
)
weight = torch.randn((8, 5), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"for now linear only supports contiguous nested tensor",
lambda: torch.nn.functional.linear(nt_noncontiguous, weight),
)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_zero_numel_errors(self, device, dtype):
ts = [torch.ones(1, 0), torch.ones(0, 0)]
nt = torch.nested.nested_tensor(
ts, device=device, dtype=dtype, layout=torch.strided
)
self.assertRaisesRegex(
RuntimeError,
r"at least one constituent tensor should have non-zero numel",
lambda: torch.nested.to_padded_tensor(nt, 0.0),
)
@dtypes(torch.float, torch.float16, torch.double)
def test_transpose(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: transpose nested dimension
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(0, 1),
)
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(1, -3),
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: nt.transpose(1, 3))
self.assertRaises(IndexError, lambda: nt.transpose(-4, -1))
# normal case
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_squeeze_unsqueeze(self, device, dtype):
a = torch.arange(6).reshape(2, 3)
b = torch.arange(15).reshape(5, 3)
nt = torch.nested.nested_tensor([a, b], device=device, dtype=dtype)
# error case: squeeze no dimension
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeeze without the dim argument",
lambda: nt.squeeze(),
)
# error case: squeeze nested dimension
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeezing dimension 0",
lambda: nt.squeeze(0),
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: nt.squeeze(3))
# error case: squeeze nested tensor of singleton tensors
c = torch.ones(1)
nt_singleton = torch.nested.nested_tensor([c, c], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeezing a nested tensor of singleton",
lambda: nt_singleton.squeeze(1),
)
# squeezing a dim which does not have size 1 should be a no-op
nt2 = nt.squeeze(-1)
self.assertEqual(nt, nt2)
# test cases that should work
nt_sizes = nt._nested_tensor_size()
nt_strides = nt._nested_tensor_strides()
for i in range(-2, 4):
if i == 0:
# cannot unsqueeze batch dim
continue
nt_unsqueezed = nt.unsqueeze(i)
# negative dim will correspond to unsqueeze() applied at dim = dim + nt.dim() + 1
wrapped_i = i + nt.dim() + 1 if i < 0 else i
# col_index into nt size tensor is requires subtraction of 1 to ignore batch dim
size_idx = wrapped_i - 1
self.assertEqual(
nt_unsqueezed._nested_tensor_size()[:, size_idx],
torch.ones(2, dtype=torch.long),
)
unsqueezed_stride = nt_unsqueezed._nested_tensor_strides()[:, size_idx]
if i == nt.ndim or i == -1:
self.assertEqual(unsqueezed_stride, torch.ones(2, dtype=torch.long))
else:
stride_col_after = nt_strides[:, size_idx]
size_col_after = nt_sizes[:, size_idx]
self.assertEqual(unsqueezed_stride, stride_col_after * size_col_after)
nt_squeezed = nt_unsqueezed.squeeze(i)
self.assertEqual(nt_squeezed, nt)
self.assertEqual(nt_squeezed._nested_tensor_size(), nt_sizes)
self.assertEqual(nt_squeezed._nested_tensor_strides(), nt_strides)
@dtypes(torch.float, torch.float16, torch.double)
def test_transpose_inference_mode_interaction(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# Construct in default mode and transpose while in inference mode
with torch.inference_mode():
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
# Construct and transpose while in inference mode
with torch.inference_mode():
nt = random_nt(device, dtype, 4, (4, 4))
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_view(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: empty shape
self.assertRaisesRegex(
RuntimeError,
r"shape '\[\]' is invalid for a nested tensor",
lambda: nt.view(()),
)
# error case: empty nested tensor
nt_empty = torch.nested.nested_tensor([])
self.assertRaisesRegex(
RuntimeError,
"empty nested tensor cannot be reshaped",
lambda: nt_empty.view(-1),
)
# error case: -1 for batch size
self.assertRaisesRegex(
RuntimeError,
r"view: For now nested view cannot change or infer the implicit batch dimension",
lambda: nt.view(-1, 2, 3),
)
self.assertRaisesRegex(
RuntimeError,
r"shape '\[.*\]' is invalid for input of size [0-9]+",
lambda: nt.view(4, 2, 3),
)
# normal case
x0 = torch.randn((2, 20), device=device, dtype=dtype)
x1 = torch.randn((3, 20), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1])
pt = torch.nested.to_padded_tensor(nt, 0.0)
# error case, trying to reshape batch dim to a legit shape
self.assertRaisesRegex(
RuntimeError,
r"For now nested view cannot change or infer the implicit batch dimension",
lambda: nt.transpose(-1, -2).view(40, -1),
)
# inherit only the ragged dimension
# (2, 20) -> (2, 5, 4)
# (3, 20) -> (3, 5, 4)
nt1 = nt.view(2, -1, 5, 4)
# (2, 3, 20) -> (2, 3, 5, 4) -> (2, 4, 5, 4)
pt1 = pt.view(2, -1, 5, 4)
self.assertEqual(noncontiguous_to_padded_tensor(nt1), pt1)
# more than one -1 (even for "old" dims), should fail
# this attempts to do # (2, (2, 3), 5, 4) -> (2, (2, 3), 5, 2, 2)
# but we ban "inherit old behavior" for >1 dimension
self.assertRaisesRegex(
RuntimeError,
r"only one dimension can be inferred",
lambda: nt1.view(2, -1, -1, 2, 2),
)
@dtypes(torch.float, torch.float16, torch.double)
def test_view_inference_mode_interaction(self, device, dtype):
# Construct in default mode and view while in inference mode
nt = torch.nested.nested_tensor(
[torch.randn((2, 20)), torch.randn((3, 20))], device=device, dtype=dtype
)
with torch.inference_mode():
ntT = nt.view(2, -1, 4, 5)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.view(2, -1, 4, 5)
self.assertEqual(ptT, ptT_from_ntT)
# Construct and view while in inference mode
with torch.inference_mode():
nt = torch.nested.nested_tensor(
[torch.randn((2, 20)), torch.randn((3, 20))], device=device, dtype=dtype
)
ntT = nt.view(2, -1, 4, 5)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.view(2, -1, 4, 5)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_reshape(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: empty shape
self.assertRaisesRegex(
RuntimeError,
r"shape '\[\]' is invalid for a nested tensor",
lambda: nt.reshape(()),
)
# error case: empty nested tensor
nt_empty = torch.nested.nested_tensor([])
self.assertRaisesRegex(
RuntimeError,
"empty nested tensor cannot be reshaped",
lambda: nt_empty.reshape(-1),
)
# error case: -1 for batch size
self.assertRaisesRegex(
RuntimeError,
r"reshape: For now nested reshape cannot change or infer the implicit batch dimension",
lambda: nt.reshape(-1, 2, 3),
)
self.assertRaisesRegex(
RuntimeError,
r"shape '\[.*\]' is invalid for input of size [0-9]+",
lambda: nt.reshape(4, 2, 3),
)
# normal case
x0 = torch.randn((2, 20), device=device, dtype=dtype)
x1 = torch.randn((3, 20), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1]) # (2, (2, 3), 20)
pt = torch.nested.to_padded_tensor(nt, 0.0)
# error case, trying to reshape batch dim to a legit shape
self.assertRaisesRegex(
RuntimeError,
r"reshape: For now nested reshape cannot change or infer the implicit batch dimension",
lambda: nt.transpose(-1, -2).reshape(40, -1),
)
# inherit only the ragged dimension
# (2, 20) -> (2, 5, 4)
# (3, 20) -> (3, 5, 4)
nt1 = nt.reshape(2, -1, 5, 4)
# (2, 3, 20) -> (2, 3, 5, 4) -> (2, 4, 5, 4)
pt1 = pt.reshape(2, -1, 5, 4)
self.assertEqual(noncontiguous_to_padded_tensor(nt1), pt1)
# more than one -1 (even for "old" dims), should fail
# this attempts to do # (2, (2, 3), 5, 4) -> (2, (2, 3), 5, 2, 2)
# but we ban "inherit old behavior" for >1 dimension
self.assertRaisesRegex(
RuntimeError,
r"only one dimension can be inferred",
lambda: nt1.reshape(2, -1, -1, 2, 2),
)
def test_nested_masked_select(self, device):
t = torch.randn([3, 3], device=device)
mask = torch.tensor([False], device=device)
njt = torch.nested.masked_select(t, mask)
self.assertEqual(njt.values(), torch.tensor([], device=device))
self.assertEqual(njt.offsets(), torch.tensor([0, 0, 0, 0], device=device))
mask = torch.tensor([[False], [False], [True]], device=device)
njt = torch.nested.masked_select(t, mask)
self.assertEqual(njt.values(), t[-1], atol=0.1, rtol=0.1)
self.assertEqual(njt.offsets(), torch.tensor([0, 0, 0, 3], device=device))
mask = torch.tensor(
[[False, False, True], [True, False, True], [False, False, True]],
device=device,
)
njt = torch.nested.masked_select(t, mask)
self.assertEqual(njt.values(), t.masked_select(mask))
self.assertEqual(njt.offsets(), torch.tensor([0, 1, 3, 4], device=device))
t = torch.randn([2, 3, 3, 1], device=device)
mask = torch.tensor(
[
[
[[True], [False], [True]],
[[True], [False], [True]],
[[True], [False], [True]],
],
[
[[False], [True], [True]],
[[False], [True], [True]],
[[True], [True], [True]],
],
],
device=device,
)
njt = torch.nested.masked_select(t, mask)
self.assertEqual(njt.values(), t.masked_select(mask))
self.assertEqual(
njt.offsets(),
torch.tensor(
[0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13],
device=device,
),
)
@dtypes(torch.float, torch.float16, torch.double)
def test_narrow(self, device, dtype):
nt = random_nt_from_dims([5, None, None, None], device=device, dtype=dtype)
# narrow on dim=0 from start to end
bounds = [(0, 5), (0, 3), (1, 2), (1, 5), (2, 4)]
for start, end in bounds:
length = end - start
narrowed = nt.narrow(dim=0, start=start, length=length)
# ensure output is a view
self.assertTrue(narrowed._base is nt)
for nc, c in zip(narrowed.unbind(), nt.unbind()[start:end]):
self.assertEqual(nc, c)
# dim != 0 is not supported
for dim in range(1, nt.dim()):
with self.assertRaisesRegex(
RuntimeError, "only dim=0 supported for nested tensors"
):
nt.narrow(dim=dim, start=0, length=1)
# error case: non-contiguous NT
_, nt_noncont = random_nt_noncontiguous_pair((2, 3, 4))
with self.assertRaisesRegex(
RuntimeError, "only contiguous nested tensors supported"
):
nt_noncont.narrow(dim=0, start=0, length=1)
@parametrize("input_dim", [3, 4])
@tf32_on_and_off(0.005)
def test_scaled_dot_product_attention(self, device, input_dim):
def rand_tensor(*shape):
return torch.randn(shape, device=device)
E = 8
if input_dim == 3:
# Shape: (N, L, E); ragged L
query = torch.nested.nested_tensor(
[rand_tensor(2, E), rand_tensor(3, E), rand_tensor(4, E)]
)
# Shape: (N, S, E); ragged S
key = torch.nested.nested_tensor(
[rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)]
)
value = torch.nested.nested_tensor(
[rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)]
)
elif input_dim == 4:
# In the 4D case the L and S is ragged
# Shape: (N, N', L, E); ragged N' and L
query = torch.nested.nested_tensor(
[rand_tensor(2, 2, E), rand_tensor(3, 3, E), rand_tensor(4, 4, E)]
)
# Shape: (N, N', S, E); ragged N' and S
key = torch.nested.nested_tensor(
[rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)]
)
value = torch.nested.nested_tensor(
[rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)]
)
else:
self.fail(f"Invalid input_dim {input_dim} encountered in SDP test")
def rand_mask(size):
return torch.randint(0, 2, size=size, dtype=torch.bool, device=device)
# Shape: (N, L, S); ragged L and S matching above
attn_mask = torch.nested.nested_tensor(
[rand_mask((2, 3)), rand_mask((3, 4)), rand_mask((4, 5))]
)
dropout_p = 0.0 # no dropout for reproducibility
# Success case: no attn_mask set and is_causal=False.
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, is_causal=False, dropout_p=dropout_p
)
expected_outputs = []
for q, k, v in zip(query.unbind(), key.unbind(), value.unbind()):
output = torch.nn.functional.scaled_dot_product_attention(
q.unsqueeze(0),
k.unsqueeze(0),
v.unsqueeze(0),
attn_mask=None,
dropout_p=dropout_p,
)
expected_outputs.append(output.squeeze(0))
expected_output_nested = torch.nested.nested_tensor(expected_outputs)
self.assertEqual(actual, expected_output_nested)
# Error case: explicit attn_mask set.
with self.assertRaisesRegex(
RuntimeError, "not supported when an explicit attn_mask is set"
):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=attn_mask, dropout_p=dropout_p
)
# Error case: is_causal=True.
with self.assertRaisesRegex(RuntimeError, "not supported when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, dropout_p=dropout_p, is_causal=True
)
@dtypes(torch.float, torch.float16, torch.double)
def test_empty_like(self, device, dtype):
ntensors = 4
nt = random_nt(device, dtype, ntensors, (4, 4))
# Create empty on same device as original nested tensor
nt_empty = torch.empty_like(nt)
assert nt.is_same_size(nt_empty)
self.assertEqual(nt.dtype, nt_empty.dtype)
self.assertEqual(nt.device, nt_empty.device)
self.assertEqual(nt.layout, nt_empty.layout)
if torch.cuda.is_available():
if device == "cpu":
nt_cuda = torch.empty_like(nt, device="cuda")
self.assertEqual(torch.device("cuda").type, nt_cuda.device.type)
else:
nt_cpu = torch.empty_like(nt, device="cpu")
self.assertEqual(torch.device("cpu").type, nt_cpu.device.type)
# Check changing dtype of empty_like nested tensor output
dtype_set = {torch.float, torch.float16, torch.double}
for other_dtype in dtype_set - {dtype}:
nt_empty_other_dtype = torch.empty_like(nt, dtype=other_dtype)
self.assertEqual(nt.dtype, dtype)
self.assertEqual(nt_empty_other_dtype.dtype, other_dtype)
self.assertEqual(nt.device, nt_empty.device)
self.assertEqual(nt.layout, nt_empty.layout)
# Create tensor for autograd
nt_empty_req_grad = torch.empty_like(nt, requires_grad=True)
self.assertEqual(nt_empty_req_grad.requires_grad, True)
# Test noncontiguous tensor does not fail to copy
nt_cont, nt_noncont = random_nt_noncontiguous_pair((2, 3, 6, 7))
nt_empty = torch.empty_like(nt_cont)
assert nt_cont.is_same_size(nt_empty)
nt_empty_non_contig = torch.empty_like(nt_noncont)
assert nt_noncont.is_same_size(nt_empty_non_contig)
# Test the contiguous memory format option
nt_empty_contig = torch.empty_like(
nt_cont, memory_format=torch.contiguous_format
)
assert nt_cont.is_same_size(nt_empty_contig)
assert nt_empty_contig.is_contiguous()
nt_empty_non_contig = torch.empty_like(
nt_noncont, memory_format=torch.contiguous_format
)
assert nt_noncont.is_same_size(nt_empty_non_contig)
assert nt_empty_non_contig.is_contiguous()
# Test other memory formats fail
self.assertRaises(
RuntimeError,
lambda: torch.empty_like(nt_cont, memory_format=torch.channels_last),
)
self.assertRaises(
RuntimeError,
lambda: torch.empty_like(nt_noncont, memory_format=torch.channels_last),
)
self.assertRaises(
RuntimeError,
lambda: torch.empty_like(nt_cont, memory_format=torch.channels_last_3d),
)
self.assertRaises(
RuntimeError,
lambda: torch.empty_like(nt_noncont, memory_format=torch.channels_last_3d),
)
@markDynamoStrictTest
|
TestNestedTensorDeviceType
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 19819,
"end": 20422
}
|
class ____(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def declared_name(self):
return self.base.declared_name()
def analyse_templates(self):
return self.base.analyse_templates()
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
|
CPtrDeclaratorNode
|
python
|
huggingface__transformers
|
src/transformers/models/speecht5/modeling_speecht5.py
|
{
"start": 43397,
"end": 45734
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: SpeechT5Config):
super().__init__()
self.attention = SpeechT5Attention(
embed_dim=config.hidden_size,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = SpeechT5FeedForward(config, config.encoder_ffn_dim)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_bias: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, hidden_size)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
position_bias (`torch.FloatTensor`):
relative position embeddings of size `(seq_len, seq_len, hidden_size // encoder_attention_heads)`
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.attention(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
SpeechT5EncoderLayer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/fill-a-special-grid.py
|
{
"start": 746,
"end": 1345
}
|
class ____(object):
def specialGrid(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
def divide_and_conquer(l, r, c):
if l == 1:
result[r][c] = idx[0]
idx[0] += 1
return
l >>= 1
for dr, dc in ((0, l), (l, 0), (0, -l), (-l, 0)):
r, c = r+dr, c+dc
divide_and_conquer(l, r, c)
total = 1<<n
result = [[0]*total for _ in xrange(total)]
idx = [0]
divide_and_conquer(total, 0, 0)
return result
|
Solution2
|
python
|
psf__black
|
scripts/release_tests.py
|
{
"start": 257,
"end": 600
}
|
class ____:
"""Used to mock the date to test generating next calver function"""
def today(*args: Any, **kwargs: Any) -> "FakeDateTime": # noqa: B902
return FakeDateTime()
# Add leading 0 on purpose to ensure we remove it
def strftime(*args: Any, **kwargs: Any) -> str: # noqa: B902
return "69.01"
|
FakeDateTime
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 175720,
"end": 179032
}
|
class ____(TemplateBuffer):
def __init__(
self,
layout: Layout,
inputs: Sequence[IRNode],
make_kernel_render: Optional[Callable[_P, _T]],
mutated_inputs: Optional[Iterable[IRNode]] = None,
allowed_prologue_inps: Optional[OrderedSet[str]] = None,
) -> None:
"""
NOTE:[TritonTemplates with multiple outputs]
We want the ability for TritonTemplates to output multiple tensors. Triton
kernels have no notion of outputs and this is done by creating tensors that
are then mutated by the kernel. Currently our STORE_OUTPUT codegen doesn't
support creating multinode outputs for triton templates.
We work around this by creating an extra input buffer during the lowering
and we mark them as mutated inputs.
"""
super().__init__(layout, inputs, make_kernel_render)
self.mutated_inputs = mutated_inputs
self.outputs: list[Buffer] = [self]
if mutated_inputs is not None:
# Ensure that the mutated inputs are only allowed for certain nodes
allowed_set = (
torch.ops.higher_order.flex_attention,
torch.ops.higher_order.flex_attention_backward,
)
current_node = V.graph.current_node.target
assert current_node in allowed_set, (
f"Mutated inputs are only allowed for {allowed_set} but got {current_node}"
)
assert isinstance(self.inputs[0], IRNode), type(self.inputs[0])
device = self.inputs[0].get_device()
self.outputs += [
MutationOutput(NoneLayout(device=device), buf, self)
for buf in mutated_inputs
]
self.allowed_prologue_inps = (
allowed_prologue_inps if allowed_prologue_inps else OrderedSet()
)
self.subgraph_inps: Optional[list[Optional[Union[IRNode, sympy.Expr]]]] = None
self.subgraph_outs: Optional[list[Optional[IRNode]]] = None
@cache_on_self_and_args("TritonTemplateBuffer")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
res = super().get_free_symbol_uses(unbacked_only)
subgraph_outs = self.subgraph_outs if self.subgraph_outs else []
subgraph_inps = self.subgraph_inps if self.subgraph_inps else []
for inp in subgraph_inps:
if isinstance(inp, sympy.Expr):
res.update(get_free_symbols(inp, unbacked_only))
elif isinstance(inp, IRNode):
res.update(inp.get_free_symbol_uses(unbacked_only))
else:
assert inp is None
for out in subgraph_outs:
if isinstance(out, IRNode):
res.update(out.get_free_symbol_uses(unbacked_only))
else:
assert out is None
return res
def get_outputs(self) -> list[Buffer]:
return self.outputs
def get_allowed_prologue_inps(self) -> OrderedSet[str]:
return self.allowed_prologue_inps
def __str__(self) -> str:
out = f"TritonTemplateBuffer(layout={self.layout})"
return out
PrimitiveInfoType = Union[int, float, bool, str, list[Union[int, str, float, bool]]]
|
TritonTemplateBuffer
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_emr_notebook_execution.py
|
{
"start": 9319,
"end": 14342
}
|
class ____:
@mock.patch("airflow.providers.amazon.aws.hooks.emr.EmrHook.conn")
def test_stop_notebook_execution(self, mock_conn):
mock_conn.stop_notebook_execution.return_value = None
test_execution_id = "test-execution-id"
op = EmrStopNotebookExecutionOperator(
task_id="test-id",
notebook_execution_id=test_execution_id,
)
op.execute(None)
mock_conn.stop_notebook_execution.assert_called_once_with(NotebookExecutionId=test_execution_id)
assert not mock_conn.describe_notebook_execution.called
@mock.patch("botocore.waiter.get_service_module_name", return_value="emr")
@mock.patch.object(EmrHook, "conn")
@mock.patch.object(Waiter, "wait")
def test_stop_notebook_execution_wait_for_completion(self, mock_waiter, mock_conn, _):
mock_conn.stop_notebook_execution.return_value = None
test_execution_id = "test-execution-id"
op = EmrStopNotebookExecutionOperator(
task_id="test-id", notebook_execution_id=test_execution_id, wait_for_completion=True
)
op.execute(None)
mock_conn.stop_notebook_execution.assert_called_once_with(NotebookExecutionId=test_execution_id)
mock_waiter.assert_called_once_with(
mock.ANY, NotebookExecutionId=test_execution_id, WaiterConfig=mock.ANY
)
assert_expected_waiter_type(mock_waiter, "notebook_stopped")
@mock.patch("botocore.waiter.get_service_module_name", return_value="emr")
@mock.patch.object(EmrHook, "conn")
@mock.patch.object(Waiter, "wait")
def test_stop_notebook_execution_wait_for_completion_fail_state(self, mock_waiter, mock_conn, _):
mock_conn.stop_notebook_execution.return_value = None
test_execution_id = "test-execution-id"
op = EmrStopNotebookExecutionOperator(
task_id="test-id", notebook_execution_id=test_execution_id, wait_for_completion=True
)
op.execute(None)
mock_conn.stop_notebook_execution.assert_called_once_with(NotebookExecutionId=test_execution_id)
mock_waiter.assert_called_once_with(
mock.ANY, NotebookExecutionId=test_execution_id, WaiterConfig=mock.ANY
)
assert_expected_waiter_type(mock_waiter, "notebook_stopped")
@mock.patch("botocore.waiter.get_service_module_name", return_value="emr")
@mock.patch("time.sleep", return_value=None)
@mock.patch.object(Waiter, "wait")
@mock.patch.object(EmrHook, "conn")
def test_stop_notebook_execution_wait_for_completion_multiple_attempts(self, mock_conn, mock_waiter, *_):
mock_conn.stop_notebook_execution.return_value = None
test_execution_id = "test-execution-id"
op = EmrStopNotebookExecutionOperator(
task_id="test-id", notebook_execution_id=test_execution_id, wait_for_completion=True
)
op.execute(None)
mock_conn.stop_notebook_execution.assert_called_once_with(NotebookExecutionId=test_execution_id)
mock_waiter.assert_called_once_with(
mock.ANY, NotebookExecutionId=test_execution_id, WaiterConfig=mock.ANY
)
assert_expected_waiter_type(mock_waiter, "notebook_stopped")
@mock.patch("botocore.waiter.get_service_module_name", return_value="emr")
@mock.patch.object(Waiter, "wait")
@mock.patch.object(EmrHook, "conn")
def test_stop_notebook_execution_waiter_config(self, mock_conn, mock_waiter, _):
test_execution_id = "test-execution-id"
waiter_max_attempts = 35
delay = 12
op = EmrStopNotebookExecutionOperator(
task_id="test-id",
notebook_execution_id=test_execution_id,
wait_for_completion=True,
waiter_max_attempts=waiter_max_attempts,
waiter_delay=delay,
)
op.execute(None)
mock_conn.stop_notebook_execution.assert_called_once_with(NotebookExecutionId=test_execution_id)
mock_waiter.assert_called_once_with(
mock.ANY,
NotebookExecutionId=test_execution_id,
WaiterConfig={"Delay": delay, "MaxAttempts": waiter_max_attempts},
)
assert_expected_waiter_type(mock_waiter, "notebook_stopped")
def test_template_fields(self):
op = EmrStartNotebookExecutionOperator(
task_id="test-id",
editor_id=PARAMS["EditorId"],
relative_path=PARAMS["RelativePath"],
cluster_id=PARAMS["ExecutionEngine"]["Id"],
service_role=PARAMS["ServiceRole"],
notebook_execution_name=PARAMS["NotebookExecutionName"],
notebook_params=PARAMS["NotebookParams"],
notebook_instance_security_group_id=PARAMS["NotebookInstanceSecurityGroupId"],
master_instance_security_group_id=PARAMS["ExecutionEngine"]["MasterInstanceSecurityGroupId"],
tags=PARAMS["Tags"],
wait_for_completion=True,
)
validate_template_fields(op)
|
TestStopEmrNotebookExecutionOperator
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
|
{
"start": 14385,
"end": 15385
}
|
class ____(str, Enum):
"""
* PENDING: Indicates that a cluster is in the process of being created.
* RUNNING: Indicates that a cluster has been started and is ready for use.
* RESTARTING: Indicates that a cluster is in the process of restarting.
* RESIZING: Indicates that a cluster is in the process of adding or removing nodes.
* TERMINATING: Indicates that a cluster is in the process of being destroyed.
* TERMINATED: Indicates that a cluster has been successfully destroyed.
* ERROR: This state is no longer used. It was used to indicate a cluster that failed to be created. `TERMINATING` and `TERMINATED` are used instead.
* UNKNOWN: Indicates that a cluster is in an unknown state. A cluster should never be in this state.
"""
pending = "PENDING"
running = "RUNNING"
restarting = "RESTARTING"
resizing = "RESIZING"
terminating = "TERMINATING"
terminated = "TERMINATED"
error = "ERROR"
unknown = "UNKNOWN"
|
ClusterState
|
python
|
huggingface__transformers
|
src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
{
"start": 51373,
"end": 54953
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None):
super().__init__()
decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
decoder_attention_heads = (
config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
)
self.dropout = config.dropout
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4Tv2Attention(
embed_dim=self.embed_dim,
num_heads=decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.conv1 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same")
self.activation_fn = ACT2FN[config.activation_function]
self.conv2 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same")
self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
self.conv_dropout = nn.Dropout(self.dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
padding_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*
or 0 for *masked*
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Conv
residual = hidden_states
# Apply padding mask to avoid leaking padded positions in the convolution layer
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2)
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2)
hidden_states = self.conv_dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.conv_layer_norm(hidden_states)
return hidden_states, self_attn_weights
############ SUB-MODELS related code ################
@auto_docstring
|
SeamlessM4Tv2TextToUnitDecoderLayer
|
python
|
prabhupant__python-ds
|
data_structures/graphs/number_of_triangles.py
|
{
"start": 315,
"end": 986
}
|
class ____:
def __init__(self, vertices, is_directed):
self.V = vertices
self.graph = [[] for i in range(vertices)]
self.is_directed = is_directed
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def count_triangles(self):
nodes = len(self.graph)
count = 0
for i in range(nodes):
for j in range(nodes):
for k in range(nodes):
if i != j and j != k and k != i and self.graph[i][j] and self.graph[j][k] and self.graph[k][i]:
count += 1
return count // 3 if is_directed else count // 6
|
Graph
|
python
|
mwaskom__seaborn
|
seaborn/axisgrid.py
|
{
"start": 845,
"end": 3106
}
|
class ____:
"""Base class for grids of subplots."""
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
@property
def fig(self):
"""DEPRECATED: prefer the `figure` property."""
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
@property
def figure(self):
"""Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
return self._figure
def apply(self, func, *args, **kwargs):
"""
Pass the grid to a user-supplied function and return self.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` is ignored; this method returns self.
See the `pipe` method if you want the return value.
Added in v0.12.0.
"""
func(self, *args, **kwargs)
return self
def pipe(self, func, *args, **kwargs):
"""
Pass the grid to a user-supplied function and return its value.
The `func` must accept an object of this type for its first
positional argument. Additional arguments are passed through.
The return value of `func` becomes the return value of this method.
See the `apply` method if you want to return self instead.
Added in v0.12.0.
"""
return func(self, *args, **kwargs)
def savefig(self, *args, **kwargs):
"""
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
"""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
|
_BaseGrid
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/typinganndata/ann_module.py
|
{
"start": 548,
"end": 615
}
|
class ____():
z: int = 5
def __init__(self, x):
pass
|
F
|
python
|
tox-dev__tox
|
src/tox/execute/local_sub_process/read_via_thread.py
|
{
"start": 603,
"end": 1602
}
|
class ____(ABC):
def __init__(self, file_no: int, handler: Callable[[bytes], None], name: str, drain: bool) -> None: # noqa: FBT001
self.file_no = file_no
self.stop = Event()
self.thread = Thread(target=self._read_stream, name=f"tox-r-{name}-{file_no}")
self.handler = handler
self._on_exit_drain = drain
def __enter__(self) -> Self:
self.thread.start()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.stop.set() # signal thread to stop
while self.thread.is_alive(): # wait until it stops
self.thread.join(WAIT_GENERAL)
self._drain_stream() # read anything left
@abstractmethod
def _read_stream(self) -> None:
raise NotImplementedError
@abstractmethod
def _drain_stream(self) -> None:
raise NotImplementedError
|
ReadViaThread
|
python
|
Pylons__pyramid
|
docs/quick_tutorial/databases/tutorial/tests.py
|
{
"start": 478,
"end": 946
}
|
class ____(unittest.TestCase):
def setUp(self):
self.session = _initTestingDB()
self.config = testing.setUp()
def tearDown(self):
self.session.remove()
testing.tearDown()
def test_wiki_view(self):
from tutorial.views import WikiViews
request = testing.DummyRequest()
inst = WikiViews(request)
response = inst.wiki_view()
self.assertEqual(response['title'], 'Wiki View')
|
WikiViewTests
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/ops/iterator_ops.py
|
{
"start": 37742,
"end": 39256
}
|
class ____(type_spec.TypeSpec):
"""Type specification for `tf.data.Iterator`.
For instance, `tf.data.IteratorSpec` can be used to define a tf.function that
takes `tf.data.Iterator` as an input argument:
>>> @tf.function(input_signature=[tf.data.IteratorSpec(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])
... def square(iterator):
... x = iterator.get_next()
... return x * x
>>> dataset = tf.data.Dataset.from_tensors(5)
>>> iterator = iter(dataset)
>>> print(square(iterator))
tf.Tensor(25, shape=(), dtype=int32)
Attributes:
element_spec: A (nested) structure of `tf.TypeSpec` objects that represents
the type specification of the iterator elements.
"""
__slots__ = ["_element_spec"]
def __init__(self, element_spec):
self._element_spec = element_spec
@property
def value_type(self):
return OwnedIterator
def _serialize(self):
return (self._element_spec,)
@property
def _component_specs(self):
return (tensor.TensorSpec([], dtypes.resource),)
def _to_components(self, value):
return (value._iterator_resource,) # pylint: disable=protected-access
def _from_components(self, components):
return OwnedIterator(
dataset=None,
components=components,
element_spec=self._element_spec)
@staticmethod
def from_value(value):
return IteratorSpec(value.element_spec) # pylint: disable=protected-access
# TODO(b/71645805): Expose trackable stateful objects from dataset.
|
IteratorSpec
|
python
|
sympy__sympy
|
sympy/liealgebras/weyl_group.py
|
{
"start": 178,
"end": 14524
}
|
class ____(Atom):
"""
For each semisimple Lie group, we have a Weyl group. It is a subgroup of
the isometry group of the root system. Specifically, it's the subgroup
that is generated by reflections through the hyperplanes orthogonal to
the roots. Therefore, Weyl groups are reflection groups, and so a Weyl
group is a finite Coxeter group.
"""
def __new__(cls, cartantype):
obj = Atom.__new__(cls)
obj.cartan_type = CartanType(cartantype)
return obj
def generators(self):
"""
This method creates the generating reflections of the Weyl group for
a given Lie algebra. For a Lie algebra of rank n, there are n
different generating reflections. This function returns them as
a list.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("F4")
>>> c.generators()
['r1', 'r2', 'r3', 'r4']
"""
n = self.cartan_type.rank()
generators = []
for i in range(1, n+1):
reflection = "r"+str(i)
generators.append(reflection)
return generators
def group_order(self):
"""
This method returns the order of the Weyl group.
For types A, B, C, D, and E the order depends on
the rank of the Lie algebra. For types F and G,
the order is fixed.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("D4")
>>> c.group_order()
192.0
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return fac(n+1)
if self.cartan_type.series in ("B", "C"):
return fac(n)*(2**n)
if self.cartan_type.series == "D":
return fac(n)*(2**(n-1))
if self.cartan_type.series == "E":
if n == 6:
return 51840
if n == 7:
return 2903040
if n == 8:
return 696729600
if self.cartan_type.series == "F":
return 1152
if self.cartan_type.series == "G":
return 12
def group_name(self):
"""
This method returns some general information about the Weyl group for
a given Lie algebra. It returns the name of the group and the elements
it acts on, if relevant.
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return "S"+str(n+1) + ": the symmetric group acting on " + str(n+1) + " elements."
if self.cartan_type.series in ("B", "C"):
return "The hyperoctahedral group acting on " + str(2*n) + " elements."
if self.cartan_type.series == "D":
return "The symmetry group of the " + str(n) + "-dimensional demihypercube."
if self.cartan_type.series == "E":
if n == 6:
return "The symmetry group of the 6-polytope."
if n == 7:
return "The symmetry group of the 7-polytope."
if n == 8:
return "The symmetry group of the 8-polytope."
if self.cartan_type.series == "F":
return "The symmetry group of the 24-cell, or icositetrachoron."
if self.cartan_type.series == "G":
return "D6, the dihedral group of order 12, and symmetry group of the hexagon."
def element_order(self, weylelt):
"""
This method returns the order of a given Weyl group element, which should
be specified by the user in the form of products of the generating
reflections, i.e. of the form r1*r2 etc.
For types A-F, this method current works by taking the matrix form of
the specified element, and then finding what power of the matrix is the
identity. It then returns this power.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> b = WeylGroup("B4")
>>> b.element_order('r1*r4*r2')
4
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n+1):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "D":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "E":
a = self.matrix_form(weylelt)
order = 1
while a != eye(8):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "G":
elts = list(weylelt)
reflections = elts[1::3]
m = self.delete_doubles(reflections)
while self.delete_doubles(m) != m:
m = self.delete_doubles(m)
reflections = m
if len(reflections) % 2 == 1:
return 2
elif len(reflections) == 0:
return 1
else:
if len(reflections) == 1:
return 2
else:
m = len(reflections) // 2
lcm = (6 * m)/ igcd(m, 6)
order = lcm / m
return order
if self.cartan_type.series == 'F':
a = self.matrix_form(weylelt)
order = 1
while a != eye(4):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series in ("B", "C"):
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
def delete_doubles(self, reflections):
"""
This is a helper method for determining the order of an element in the
Weyl group of G2. It takes a Weyl element and if repeated simple reflections
in it, it deletes them.
"""
copy = list(reflections)
for counter, elt in enumerate(copy):
if counter < len(copy)-1:
if copy[counter + 1] == elt:
del copy[counter]
del copy[counter]
return copy
def matrix_form(self, weylelt):
"""
This method takes input from the user in the form of products of the
generating reflections, and returns the matrix corresponding to the
element of the Weyl group. Since each element of the Weyl group is
a reflection of some type, there is a corresponding matrix representation.
This method uses the standard representation for all the generating
reflections.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> f = WeylGroup("F4")
>>> f.matrix_form('r2*r3')
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1],
[0, 0, 1, 0]])
"""
elts = list(weylelt)
reflections = elts[1::3]
n = self.cartan_type.rank()
if self.cartan_type.series == 'A':
matrixform = eye(n+1)
for elt in reflections:
a = int(elt)
mat = eye(n+1)
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'D':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a < n:
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
else:
mat[n-2, n-1] = -1
mat[n-2, n-2] = 0
mat[n-1, n-2] = -1
mat[n-1, n-1] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'G':
matrixform = eye(3)
for elt in reflections:
a = int(elt)
if a == 1:
gen1 = Matrix([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
matrixform *= gen1
else:
gen2 = Matrix([[Rational(2, 3), Rational(2, 3), Rational(-1, 3)],
[Rational(2, 3), Rational(-1, 3), Rational(2, 3)],
[Rational(-1, 3), Rational(2, 3), Rational(2, 3)]])
matrixform *= gen2
return matrixform
if self.cartan_type.series == 'F':
matrixform = eye(4)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
matrixform *= mat
elif a == 2:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrixform *= mat
elif a == 3:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
matrixform *= mat
else:
mat = Matrix([[Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2)],
[Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]])
matrixform *= mat
return matrixform
if self.cartan_type.series == 'E':
matrixform = eye(8)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[Rational(3, 4), Rational(1, 4), Rational(1, 4), Rational(1, 4),
Rational(1, 4), Rational(1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(3, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(3, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(3, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(3, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-3, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4)]])
matrixform *= mat
elif a == 2:
mat = eye(8)
mat[0, 0] = 0
mat[0, 1] = -1
mat[1, 0] = -1
mat[1, 1] = 0
matrixform *= mat
else:
mat = eye(8)
mat[a-3, a-3] = 0
mat[a-3, a-2] = 1
mat[a-2, a-3] = 1
mat[a-2, a-2] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series in ("B", "C"):
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a == 1:
mat[0, 0] = -1
matrixform *= mat
else:
mat[a - 2, a - 2] = 0
mat[a-2, a-1] = 1
mat[a - 1, a - 2] = 1
mat[a -1, a - 1] = 0
matrixform *= mat
return matrixform
def coxeter_diagram(self):
"""
This method returns the Coxeter diagram corresponding to a Weyl group.
The Coxeter diagram can be obtained from a Lie algebra's Dynkin diagram
by deleting all arrows; the Coxeter diagram is the undirected graph.
The vertices of the Coxeter diagram represent the generating reflections
of the Weyl group, $s_i$. An edge is drawn between $s_i$ and $s_j$ if the order
$m(i, j)$ of $s_is_j$ is greater than two. If there is one edge, the order
$m(i, j)$ is 3. If there are two edges, the order $m(i, j)$ is 4, and if there
are three edges, the order $m(i, j)$ is 6.
Examples
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("B3")
>>> print(c.coxeter_diagram())
0---0===0
1 2 3
"""
n = self.cartan_type.rank()
if self.cartan_type.series in ("A", "D", "E"):
return self.cartan_type.dynkin_diagram()
if self.cartan_type.series in ("B", "C"):
diag = "---".join("0" for i in range(1, n)) + "===0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
if self.cartan_type.series == "F":
diag = "0---0===0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
if self.cartan_type.series == "G":
diag = "0≡≡≡0\n1 2"
return diag
|
WeylGroup
|
python
|
pytorch__pytorch
|
torch/nn/modules/rnn.py
|
{
"start": 62713,
"end": 66931
}
|
class ____(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
Inputs: input, hidden
- **input**: tensor containing input features
- **hidden**: tensor containing the initial hidden state
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Shape:
- input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`.
- hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
- output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
Examples::
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
__constants__ = ["input_size", "hidden_size", "bias", "nonlinearity"]
nonlinearity: str
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool = True,
nonlinearity: str = "tanh",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
self.nonlinearity = nonlinearity
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
if input.dim() not in (1, 2):
raise ValueError(
f"RNNCell: Expected input to be 1D or 2D, got {input.dim()}D instead"
)
if hx is not None and hx.dim() not in (1, 2):
raise ValueError(
f"RNNCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead"
)
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(
input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
if self.nonlinearity == "tanh":
ret = _VF.rnn_tanh_cell(
input,
hx,
self.weight_ih,
self.weight_hh,
self.bias_ih,
self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = _VF.rnn_relu_cell(
input,
hx,
self.weight_ih,
self.weight_hh,
self.bias_ih,
self.bias_hh,
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(f"Unknown nonlinearity: {self.nonlinearity}")
if not is_batched:
ret = ret.squeeze(0)
return ret
|
RNNCell
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_fsspec.py
|
{
"start": 2554,
"end": 6239
}
|
class ____(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
@with_comms(backend=BACKEND, init_rpc=False)
@requires_accelerator_dist_backend()
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_fsspec(self):
CHECKPOINT_DIR = self.temp_dir
model = FSDP(MyTestModule().to(device_type))
optim = torch.optim.Adam(model.parameters(), lr=0.1)
model(torch.rand(8, 8, device=dist.get_rank())).sum().backward()
optim.step()
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = {
"model": model.state_dict(),
"optim": FSDP.optim_state_dict(model, optim),
}
dcp.save(
state_dict=state_dict,
storage_writer=FsspecWriter(CHECKPOINT_DIR),
planner=dcp.DefaultSavePlanner(),
)
model_2 = FSDP(MyTestModule().to(device_type))
optim_2 = torch.optim.Adam(model_2.parameters(), lr=0.1)
with FSDP.summon_full_params(model):
with FSDP.summon_full_params(model_2):
for n_p1, n_p2 in zip(
model.named_parameters(), model_2.named_parameters()
):
self.assertNotEqual(n_p1[1], n_p2[1])
# now load the model and ensure the values are the same
with FSDP.state_dict_type(model_2, StateDictType.SHARDED_STATE_DICT):
state_dict = {
"model": model_2.state_dict(),
}
dcp.load(
state_dict=state_dict,
storage_reader=FsspecReader(CHECKPOINT_DIR),
planner=dcp.DefaultLoadPlanner(),
)
model_2.load_state_dict(state_dict["model"])
optim_state = load_sharded_optimizer_state_dict(
model_state_dict=state_dict["model"],
optimizer_key="optim",
storage_reader=FsspecReader(CHECKPOINT_DIR),
)
flattened_osd = FSDP.optim_state_dict_to_load(
model_2, optim_2, optim_state["optim"]
)
optim_2.load_state_dict(flattened_osd)
with FSDP.summon_full_params(model):
with FSDP.summon_full_params(model_2):
for n_p1, n_p2 in zip(
model.named_parameters(), model_2.named_parameters()
):
self.assertEqual(n_p1[1], n_p2[1])
def opt_at(opt, idx):
return list(iter(opt.state.values()))[idx]
# Adam lazily creates its state
self.assertEqual(opt_at(optim, 0)["exp_avg"], opt_at(optim_2, 0)["exp_avg"])
self.assertEqual(
opt_at(optim, 0)["exp_avg_sq"], opt_at(optim_2, 0)["exp_avg_sq"]
)
@with_comms(backend=BACKEND, init_rpc=False)
@requires_accelerator_dist_backend()
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_overwrite(self):
t1, t2 = torch.randn(10), torch.randn(10)
dcp.save(
{"random": t1}, storage_writer=FsspecWriter(self.temp_dir, overwrite=False)
)
dcp.save(
{"random": t2}, storage_writer=FsspecWriter(self.temp_dir, overwrite=True)
)
sd = {"random": torch.zeros(10)}
dcp.load(sd, checkpoint_id=self.temp_dir)
self.assertTrue(torch.allclose(sd["random"], t2))
with self.assertRaisesRegex(
CheckpointException, ".*Checkpoint already exists.*"
):
dcp.save(
{"random": t2},
storage_writer=FsspecWriter(self.temp_dir, overwrite=False),
)
|
TestFSSpec
|
python
|
openai__openai-python
|
src/openai/types/evals/create_eval_completions_run_data_source.py
|
{
"start": 4783,
"end": 5304
}
|
class ____(BaseModel):
item_reference: str
"""A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" """
type: Literal["item_reference"]
"""The type of input messages. Always `item_reference`."""
InputMessages: TypeAlias = Annotated[
Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
]
SamplingParamsResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
|
InputMessagesItemReference
|
python
|
ApeWorX__ape
|
src/ape/exceptions.py
|
{
"start": 14932,
"end": 15198
}
|
class ____(ProjectError, AttributeError):
"""
Raised when trying to access items via ``.`` access.
"""
def __init__(self, msg: str, base_err: Optional[Exception] = None):
self.base_err = base_err
super().__init__(msg)
|
ApeAttributeError
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 673166,
"end": 673527
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "project_next")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
project_next = sgqlc.types.Field("ProjectNext", graphql_name="projectNext")
|
UpdateProjectNextPayload
|
python
|
skorch-dev__skorch
|
skorch/callbacks/training.py
|
{
"start": 22553,
"end": 23039
}
|
class ____(ParamMapper):
"""Freeze matching parameters at the start of the first epoch. You may
specify a specific point in time (either by epoch number or using a
callable) when the parameters are frozen using the ``at`` parameter.
See :class:`.ParamMapper` for details.
"""
def __init__(self, *args, **kwargs):
kwargs['at'] = kwargs.get('at', 1)
kwargs['fn'] = kwargs.get('fn', freeze_parameter)
super().__init__(*args, **kwargs)
|
Freezer
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/monitoring/request_id.py
|
{
"start": 59,
"end": 265
}
|
class ____:
def __call__(self) -> int:
return 1
serve.run(Model.bind())
resp = requests.get("http://localhost:8000", headers={"X-Request-ID": "123-234"})
print(resp.headers["X-Request-ID"])
|
Model
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 57101,
"end": 57251
}
|
class ____:
skip: bool
vectorize_property_name: bool
PropertyVectorizerConfig = _PropertyVectorizerConfig
@dataclass
|
_PropertyVectorizerConfig
|
python
|
redis__redis-py
|
redis/cache.py
|
{
"start": 1564,
"end": 2086
}
|
class ____(ABC):
@property
@abstractmethod
def cache(self):
pass
@cache.setter
@abstractmethod
def cache(self, value):
pass
@property
@abstractmethod
def type(self) -> EvictionPolicyType:
pass
@abstractmethod
def evict_next(self) -> CacheKey:
pass
@abstractmethod
def evict_many(self, count: int) -> List[CacheKey]:
pass
@abstractmethod
def touch(self, cache_key: CacheKey) -> None:
pass
|
EvictionPolicyInterface
|
python
|
eventlet__eventlet
|
eventlet/green/zmq.py
|
{
"start": 7058,
"end": 18018
}
|
class ____(_Socket):
"""Green version of :class:``zmq.core.socket.Socket``.
The following three methods are always overridden:
* send
* recv
* getsockopt
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
``zmq.EAGAIN`` (retry) error is raised.
For some socket types, the following methods are also overridden:
* send_multipart
* recv_multipart
"""
def __init__(self, context, socket_type):
super().__init__(context, socket_type)
self.__dict__['_eventlet_send_event'] = _BlockedThread()
self.__dict__['_eventlet_recv_event'] = _BlockedThread()
self.__dict__['_eventlet_send_lock'] = _QueueLock()
self.__dict__['_eventlet_recv_lock'] = _QueueLock()
def event(fd):
# Some events arrived at the zmq socket. This may mean
# there's a message that can be read or there's space for
# a message to be written.
send_wake = self._eventlet_send_event.wake()
recv_wake = self._eventlet_recv_event.wake()
if not send_wake and not recv_wake:
# if no waiting send or recv thread was woken up, then
# force the zmq socket's events to be processed to
# avoid repeated wakeups
_Socket_getsockopt(self, EVENTS)
hub = eventlet.hubs.get_hub()
self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
self.getsockopt(FD),
event,
lambda _: None,
lambda: None)
self.__dict__['_eventlet_clock'] = hub.clock
@_wraps(_Socket.close)
def close(self, linger=None):
super().close(linger)
if self._eventlet_listener is not None:
eventlet.hubs.get_hub().remove(self._eventlet_listener)
self.__dict__['_eventlet_listener'] = None
# wake any blocked threads
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
@_wraps(_Socket.getsockopt)
def getsockopt(self, option):
result = _Socket_getsockopt(self, option)
if option == EVENTS:
# Getting the events causes the zmq socket to process
# events which may mean a msg can be sent or received. If
# there is a greenthread blocked and waiting for events,
# it will miss the edge-triggered read event, so wake it
# up.
if (result & POLLOUT):
self._eventlet_send_event.wake()
if (result & POLLIN):
self._eventlet_recv_event.wake()
return result
@_wraps(_Socket.send)
def send(self, msg, flags=0, copy=True, track=False):
"""A send method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
result = _Socket_send(self, msg, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return result
# TODO: pyzmq will copy the message buffer and create Message
# objects under some circumstances. We could do that work here
# once to avoid doing it every time the send is retried.
flags |= NOBLOCK
with self._eventlet_send_lock:
while True:
try:
return _Socket_send(self, msg, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
self._eventlet_send_event.block()
else:
raise
finally:
# The call to send processes 0mq events and may
# make the socket ready to recv. Wake the next
# receiver. (Could check EVENTS for POLLIN here)
self._eventlet_recv_event.wake()
@_wraps(_Socket.send_multipart)
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""A send_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
@_wraps(_Socket.send_string)
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
"""A send_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_string(self, u, flags, copy, encoding)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_string(self, u, flags, copy, encoding)
@_wraps(_Socket.send_pyobj)
def send_pyobj(self, obj, flags=0, protocol=2):
"""A send_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_pyobj(self, obj, flags, protocol)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_pyobj(self, obj, flags, protocol)
@_wraps(_Socket.send_json)
def send_json(self, obj, flags=0, **kwargs):
"""A send_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_json(self, obj, flags, **kwargs)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_json(self, obj, flags, **kwargs)
@_wraps(_Socket.recv)
def recv(self, flags=0, copy=True, track=False):
"""A recv method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
msg = _Socket_recv(self, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return msg
deadline = None
if hasattr(__zmq__, 'RCVTIMEO'):
sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
if sock_timeout == -1:
pass
elif sock_timeout > 0:
deadline = self._eventlet_clock() + sock_timeout / 1000.0
else:
raise ValueError(sock_timeout)
flags |= NOBLOCK
with self._eventlet_recv_lock:
while True:
try:
return _Socket_recv(self, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
# zmq in its wisdom decided to reuse EAGAIN for timeouts
if deadline is not None and self._eventlet_clock() > deadline:
e.is_timeout = True
raise
self._eventlet_recv_event.block(deadline=deadline)
else:
raise
finally:
# The call to recv processes 0mq events and may
# make the socket ready to send. Wake the next
# receiver. (Could check EVENTS for POLLOUT here)
self._eventlet_send_event.wake()
@_wraps(_Socket.recv_multipart)
def recv_multipart(self, flags=0, copy=True, track=False):
"""A recv_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_multipart(self, flags, copy, track)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_multipart(self, flags, copy, track)
@_wraps(_Socket.recv_string)
def recv_string(self, flags=0, encoding='utf-8'):
"""A recv_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_string(self, flags, encoding)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_string(self, flags, encoding)
@_wraps(_Socket.recv_json)
def recv_json(self, flags=0, **kwargs):
"""A recv_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_json(self, flags, **kwargs)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_json(self, flags, **kwargs)
@_wraps(_Socket.recv_pyobj)
def recv_pyobj(self, flags=0):
"""A recv_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_pyobj(self, flags)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_pyobj(self, flags)
|
Socket
|
python
|
PyCQA__pylint
|
tests/functional/u/unsupported/unsupported_binary_operation.py
|
{
"start": 1738,
"end": 1780
}
|
class ____(Unknown):
pass
Base() * 23
|
Base
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/widgets/splitter.py
|
{
"start": 819,
"end": 11682
}
|
class ____(QSplitter, SpyderWidgetMixin):
"""QSplitter for editor windows."""
CONF_SECTION = "editor"
def __init__(self, parent, main_widget, menu_actions, first=False,
register_editorstack_cb=None, unregister_editorstack_cb=None,
use_switcher=True):
"""Create a splitter for dividing an editor window into panels.
Adds a new EditorStack instance to this splitter. If it's not
the first splitter, clones the current EditorStack from the
EditorMainWidget.
Args:
parent: Parent widget.
main_widget: PluginMainWidget this widget belongs to.
menu_actions: QActions to include from the parent.
first: Boolean if this is the first splitter in the editor.
register_editorstack_cb: Callback to register the EditorStack.
Defaults to main_widget.register_editorstack() to
register the EditorStack with the EditorMainWidget.
unregister_editorstack_cb: Callback to unregister the EditorStack.
Defaults to main_widget.unregister_editorstack() to
unregister the EditorStack with the EditorMainWidget.
"""
if not PYSIDE2:
super().__init__(parent, class_parent=main_widget)
else:
QSplitter.__init__(self, parent)
SpyderWidgetMixin.__init__(self, class_parent=main_widget)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setChildrenCollapsible(False)
self.toolbar_list = None
self.menu_list = None
self.main_widget = main_widget
if register_editorstack_cb is None:
register_editorstack_cb = self.main_widget.register_editorstack
self.register_editorstack_cb = register_editorstack_cb
if unregister_editorstack_cb is None:
unregister_editorstack_cb = self.main_widget.unregister_editorstack
self.unregister_editorstack_cb = unregister_editorstack_cb
self.menu_actions = menu_actions
self.editorstack = EditorStack(self, menu_actions, use_switcher)
self.register_editorstack_cb(self.editorstack)
if not first:
self.main_widget.clone_editorstack(editorstack=self.editorstack)
self.editorstack.destroyed.connect(self.editorstack_closed)
self.editorstack.sig_split_vertically.connect(
lambda: self.split(orientation=Qt.Vertical))
self.editorstack.sig_split_horizontally.connect(
lambda: self.split(orientation=Qt.Horizontal))
self.addWidget(self.editorstack)
if not running_under_pytest():
self.editorstack.set_color_scheme(main_widget._get_color_scheme())
self.setStyleSheet(self._stylesheet)
def closeEvent(self, event):
"""Override QWidget closeEvent().
This event handler is called with the given event when Qt
receives a window close request from a top-level widget.
"""
QSplitter.closeEvent(self, event)
def __give_focus_to_remaining_editor(self):
focus_widget = self.main_widget.get_focus_widget()
if focus_widget is not None:
focus_widget.setFocus()
@Slot()
def editorstack_closed(self):
logger.debug("Closing EditorStack")
try:
self.unregister_editorstack_cb(self.editorstack)
self.editorstack = None
close_splitter = self.count() == 1
if close_splitter:
# editorstack just closed was the last widget in this QSplitter
self.close()
return
self.__give_focus_to_remaining_editor()
except (RuntimeError, AttributeError):
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
def editorsplitter_closed(self):
logger.debug("Closing EditorSplitter")
try:
close_splitter = self.count() == 1 and self.editorstack is None
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorsplitter just closed was the last widget in this QSplitter
self.close()
return
elif self.count() == 2 and self.editorstack:
# back to the initial state: a single editorstack instance,
# as a single widget in this QSplitter: orientation may be changed
self.editorstack.reset_orientation()
self.__give_focus_to_remaining_editor()
def split(self, orientation=Qt.Vertical):
"""
Create and attach a new EditorSplitter to the current EditorSplitter.
The new EditorSplitter widget will contain an EditorStack that
is a clone of the current EditorStack.
A single EditorSplitter instance can be split multiple times, but the
orientation will be the same for all the direct splits. If one of
the child splits is split, then that split can have a different
orientation.
"""
logger.debug("Create a new EditorSplitter")
self.setOrientation(orientation)
self.editorstack.set_orientation(orientation)
editorsplitter = EditorSplitter(
self.parent(),
self.main_widget,
self.menu_actions,
register_editorstack_cb=self.register_editorstack_cb,
unregister_editorstack_cb=self.unregister_editorstack_cb
)
self.addWidget(editorsplitter)
editorsplitter.destroyed.connect(self.editorsplitter_closed)
current_editor = editorsplitter.editorstack.get_current_editor()
if current_editor is not None:
current_editor.setFocus()
def iter_editorstacks(self):
"""Return the editor stacks for this splitter and every first child.
Note: If a splitter contains more than one splitter as a direct
child, only the first child's editor stack is included.
Returns:
List of tuples containing (EditorStack instance, orientation).
"""
editorstacks = [(self.widget(0), self.orientation())]
if self.count() > 1:
editorsplitter = self.widget(1)
editorstacks += editorsplitter.iter_editorstacks()
return editorstacks
def get_layout_settings(self):
"""Return the layout state for this splitter and its children.
Record the current state, including file names and current line
numbers, of the splitter panels.
Returns:
A dictionary containing keys {hexstate, sizes, splitsettings}.
hexstate: String of saveState() for self.
sizes: List for size() for self.
splitsettings: List of tuples of the form
(orientation, cfname, clines) for each EditorSplitter
and its EditorStack.
orientation: orientation() for the editor
splitter (which may be a child of self).
cfname: EditorStack current file name.
clines: Current line number for each file in the
EditorStack.
"""
splitsettings = []
for editorstack, orientation in self.iter_editorstacks():
clines = []
cfname = ''
# XXX - this overrides value from the loop to always be False?
orientation = False
if hasattr(editorstack, 'data'):
clines = [finfo.editor.get_cursor_line_number()
for finfo in editorstack.data]
cfname = editorstack.get_current_filename()
splitsettings.append((orientation == Qt.Vertical, cfname, clines))
return dict(
hexstate=qbytearray_to_str(self.saveState()),
sizes=self.sizes(),
splitsettings=splitsettings,
)
def set_layout_settings(self, settings, dont_goto=None):
"""Restore layout state for the splitter panels.
Apply the settings to restore a saved layout within the editor. If
the splitsettings key doesn't exist, then return without restoring
any settings.
The current EditorSplitter (self) calls split() for each element
in split_settings, thus recreating the splitter panels from the saved
state. split() also clones the editorstack, which is then
iterated over to restore the saved line numbers on each file.
The size and positioning of each splitter panel is restored from
hexstate.
Args:
settings: A dictionary with keys {hexstate, sizes, orientation}
that define the layout for the EditorSplitter panels.
dont_goto: Defaults to None, which positions the cursor to the
end of the editor. If there's a value, positions the
cursor on the saved line number for each editor.
"""
splitsettings = settings.get('splitsettings')
if splitsettings is None:
return
splitter = self
editor = None
for i, (is_vertical, cfname, clines) in enumerate(splitsettings):
if i > 0:
splitter.split(Qt.Vertical if is_vertical else Qt.Horizontal)
splitter = splitter.widget(1)
editorstack = splitter.widget(0)
for j, finfo in enumerate(editorstack.data):
editor = finfo.editor
# TODO: go_to_line is not working properly (the line it jumps
# to is not the corresponding to that file). This will be fixed
# in a future PR (which will fix spyder-ide/spyder#3857).
if dont_goto is not None:
# Skip go to line for first file because is already there.
pass
else:
try:
editor.go_to_line(clines[j])
except IndexError:
pass
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState(
QByteArray().fromHex(str(hexstate).encode('utf-8'))
)
sizes = settings.get('sizes')
if sizes is not None:
self.setSizes(sizes)
if editor is not None:
editor.clearFocus()
editor.setFocus()
@property
def _stylesheet(self):
css = qstylizer.style.StyleSheet()
css.QSplitter.setValues(
background=SpyderPalette.COLOR_BACKGROUND_1
)
return css.toString()
|
EditorSplitter
|
python
|
gevent__gevent
|
src/gevent/ssl.py
|
{
"start": 2727,
"end": 2974
}
|
class ____(socket._gevent_sock_class):
__slots__ = ('_sslsock',)
def __init__(self, family, type, proto, fileno, sslsocket_wref):
super().__init__(family, type, proto, fileno)
self._sslsock = sslsocket_wref
|
_contextawaresock
|
python
|
getsentry__sentry
|
src/sentry/replays/post_process.py
|
{
"start": 856,
"end": 1049
}
|
class ____(TypedDict, total=False):
id: str | None
username: str | None
email: str | None
ip: str | None
display_name: str | None
geo: UserGeoResponseType
|
UserResponseType
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.